source
stringlengths
3
92
c
stringlengths
26
2.25M
b3ld.c
/**** BSIM3v3.3.0, Released by Xuemei Xi 07/29/2005 ****/ /**** OpenMP support for ngspice by Holger Vogt 06/28/2010 ****/ /********** * Copyright 2004 Regents of the University of California. All rights reserved. * File: b3ld.c of BSIM3v3.3.0 * Author: 1991 JianHui Huang and Min-Chie Jeng. * Modified by Mansun Chan (1995). * Author: 1997-1999 Weidong Liu. * Author: 2001 Xuemei Xi * Modified by Xuemei Xi, 10/05, 12/21, 2001. * Modified by Xuemei Xi, 07/29/2005. **********/ #include "ngspice/ngspice.h" #include "ngspice/cktdefs.h" #include "bsim3def.h" #include "ngspice/trandefs.h" #include "ngspice/const.h" #include "ngspice/sperror.h" #include "ngspice/devdefs.h" #include "ngspice/suffix.h" #define MAX_EXP 5.834617425e14 #define MIN_EXP 1.713908431e-15 #define EXP_THRESHOLD 34.0 #define EPSOX 3.453133e-11 #define EPSSI 1.03594e-10 #define Charge_q 1.60219e-19 #define DELTA_1 0.02 #define DELTA_2 0.02 #define DELTA_3 0.02 #define DELTA_4 0.02 #ifdef USE_OMP int BSIM3LoadOMP(BSIM3instance *here, CKTcircuit *ckt); void BSIM3LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt); #endif int BSIM3load( GENmodel *inModel, CKTcircuit *ckt) { #ifdef USE_OMP int idx; BSIM3model *model = (BSIM3model*)inModel; int error = 0; BSIM3instance **InstArray; InstArray = model->BSIM3InstanceArray; #pragma omp parallel for for (idx = 0; idx < model->BSIM3InstCount; idx++) { BSIM3instance *here = InstArray[idx]; int local_error = BSIM3LoadOMP(here, ckt); if (local_error) error = local_error; } BSIM3LoadRhsMat(inModel, ckt); return error; } int BSIM3LoadOMP(BSIM3instance *here, CKTcircuit *ckt) { BSIM3model *model = BSIM3modPtr(here); #else BSIM3model *model = (BSIM3model*)inModel; BSIM3instance *here; #endif double SourceSatCurrent, DrainSatCurrent; double ag0, qgd, qgs, qgb, von, cbhat, VgstNVt, ExpVgst; double cdrain, cdhat, cdreq, ceqbd, ceqbs, ceqqb, ceqqd, ceqqg, ceq, geq; double czbd, czbdsw, czbdswg, czbs, czbssw, czbsswg, evbd, evbs, arg, sarg; double delvbd, delvbs, delvds, delvgd, delvgs; double Vfbeff, dVfbeff_dVg, dVfbeff_dVb, V3, V4; double gcbdb, gcbgb, gcbsb, gcddb, gcdgb, gcdsb, gcgdb, gcggb, gcgsb, gcsdb; #ifndef NEWCONV double tol; #endif double gcsgb, gcssb, MJ, MJSW, MJSWG; double vbd, vbs, vds, vgb, vgd, vgs, vgdo; #ifndef PREDICTOR double xfact; #endif double qgate=0.0, qbulk=0.0, qdrn=0.0, qsrc, qinoi, cqgate, cqbulk, cqdrn; double Vds, Vgs, Vbs, Gmbs, FwdSum, RevSum; double Vgs_eff, Vfb; double Phis, dPhis_dVb, sqrtPhis, dsqrtPhis_dVb, Vth, dVth_dVb, dVth_dVd; double Vgst, dVgst_dVg, dVgst_dVb, dVgs_eff_dVg, Nvtm; double Vtm; double n, dn_dVb, dn_dVd, voffcv, noff, dnoff_dVd, dnoff_dVb; double ExpArg, V0, CoxWLcen, QovCox, LINK; double DeltaPhi, dDeltaPhi_dVg, VgDP, dVgDP_dVg; double Cox, Tox, Tcen, dTcen_dVg, dTcen_dVd, dTcen_dVb; double Ccen, Coxeff, dCoxeff_dVg, dCoxeff_dVd, dCoxeff_dVb; double Denomi, dDenomi_dVg, dDenomi_dVd, dDenomi_dVb; double ueff, dueff_dVg, dueff_dVd, dueff_dVb; double Esat, Vdsat; double EsatL, dEsatL_dVg, dEsatL_dVd, dEsatL_dVb; double dVdsat_dVg, dVdsat_dVb, dVdsat_dVd, Vasat, dAlphaz_dVg, dAlphaz_dVb; double dVasat_dVg, dVasat_dVb, dVasat_dVd, Va, dVa_dVd, dVa_dVg, dVa_dVb; double Vbseff, dVbseff_dVb, VbseffCV, dVbseffCV_dVb; double Arg1, One_Third_CoxWL, Two_Third_CoxWL, Alphaz, CoxWL; double T0, dT0_dVg, dT0_dVd, dT0_dVb; double T1, dT1_dVg, dT1_dVd, dT1_dVb; double T2, dT2_dVg, dT2_dVd, dT2_dVb; double T3, dT3_dVg, dT3_dVd, dT3_dVb; double T4; double T5; double T6; double T7; double T8; double T9; double T10; double T11, T12; double tmp, Abulk, dAbulk_dVb, Abulk0, dAbulk0_dVb; double VACLM, dVACLM_dVg, dVACLM_dVd, dVACLM_dVb; double VADIBL, dVADIBL_dVg, dVADIBL_dVd, dVADIBL_dVb; double Xdep, dXdep_dVb, lt1, dlt1_dVb, ltw, dltw_dVb, Delt_vth, dDelt_vth_dVb; double Theta0, dTheta0_dVb; double TempRatio, tmp1, tmp2, tmp3, tmp4; double DIBL_Sft, dDIBL_Sft_dVd, Lambda, dLambda_dVg; double Idtot, Ibtot; #ifndef NOBYPASS double tempv; #endif double a1, ScalingFactor; double Vgsteff, dVgsteff_dVg, dVgsteff_dVd, dVgsteff_dVb; double Vdseff, dVdseff_dVg, dVdseff_dVd, dVdseff_dVb; double VdseffCV, dVdseffCV_dVg, dVdseffCV_dVd, dVdseffCV_dVb; double diffVds, dAbulk_dVg; double beta, dbeta_dVg, dbeta_dVd, dbeta_dVb; double gche, dgche_dVg, dgche_dVd, dgche_dVb; double fgche1, dfgche1_dVg, dfgche1_dVd, dfgche1_dVb; double fgche2, dfgche2_dVg, dfgche2_dVd, dfgche2_dVb; double Idl, dIdl_dVg, dIdl_dVd, dIdl_dVb; double Idsa, dIdsa_dVg, dIdsa_dVd, dIdsa_dVb; double Ids, Gm, Gds, Gmb; double Isub, Gbd, Gbg, Gbb; double VASCBE, dVASCBE_dVg, dVASCBE_dVd, dVASCBE_dVb; double CoxWovL; double Rds, dRds_dVg, dRds_dVb, WVCox, WVCoxRds; double Vgst2Vtm, VdsatCV, dVdsatCV_dVg, dVdsatCV_dVb; double Leff, Weff, dWeff_dVg, dWeff_dVb; double AbulkCV, dAbulkCV_dVb; double qgdo, qgso, cgdo, cgso; double qcheq=0.0, qdef, gqdef=0.0, cqdef, cqcheq, gtau_diff, gtau_drift; double gcqdb=0.0,gcqsb=0.0,gcqgb=0.0,gcqbb=0.0; double dxpart, sxpart, ggtg, ggtd, ggts, ggtb; double ddxpart_dVd, ddxpart_dVg, ddxpart_dVb, ddxpart_dVs; double dsxpart_dVd, dsxpart_dVg, dsxpart_dVb, dsxpart_dVs; double gbspsp, gbbdp, gbbsp, gbspg, gbspb, gbspdp; double gbdpdp, gbdpg, gbdpb, gbdpsp; double Cgg, Cgd, Cgb, Cdg, Cdd, Cds; double Csg, Csd, Css, Csb, Cbg, Cbd, Cbb; double Cgg1, Cgb1, Cgd1, Cbg1, Cbb1, Cbd1, Qac0, Qsub0; double dQac0_dVg, dQac0_dVb, dQsub0_dVg, dQsub0_dVd, dQsub0_dVb; double m; struct bsim3SizeDependParam *pParam; int ByPass, Check, ChargeComputationNeeded, error; /* double junk[50]; */ ScalingFactor = 1.0e-9; ChargeComputationNeeded = ((ckt->CKTmode & (MODEDCTRANCURVE | MODEAC | MODETRAN | MODEINITSMSIG)) || ((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC))) ? 1 : 0; #ifndef USE_OMP for (; model != NULL; model = BSIM3nextModel(model)) { for (here = BSIM3instances(model); here != NULL; here = BSIM3nextInstance(here)) { #endif Check = 1; ByPass = 0; pParam = here->pParam; if ((ckt->CKTmode & MODEINITSMSIG)) { vbs = *(ckt->CKTstate0 + here->BSIM3vbs); vgs = *(ckt->CKTstate0 + here->BSIM3vgs); vds = *(ckt->CKTstate0 + here->BSIM3vds); qdef = *(ckt->CKTstate0 + here->BSIM3qdef); } else if ((ckt->CKTmode & MODEINITTRAN)) { vbs = *(ckt->CKTstate1 + here->BSIM3vbs); vgs = *(ckt->CKTstate1 + here->BSIM3vgs); vds = *(ckt->CKTstate1 + here->BSIM3vds); qdef = *(ckt->CKTstate1 + here->BSIM3qdef); } else if ((ckt->CKTmode & MODEINITJCT) && !here->BSIM3off) { vds = model->BSIM3type * here->BSIM3icVDS; vgs = model->BSIM3type * here->BSIM3icVGS; vbs = model->BSIM3type * here->BSIM3icVBS; qdef = 0.0; if ((vds == 0.0) && (vgs == 0.0) && (vbs == 0.0) && ((ckt->CKTmode & (MODETRAN | MODEAC|MODEDCOP | MODEDCTRANCURVE)) || (!(ckt->CKTmode & MODEUIC)))) { vbs = 0.0; vgs = model->BSIM3type * here->BSIM3vth0 + 0.1; vds = 0.1; } } else if ((ckt->CKTmode & (MODEINITJCT | MODEINITFIX)) && (here->BSIM3off)) { qdef = vbs = vgs = vds = 0.0; } else { #ifndef PREDICTOR if ((ckt->CKTmode & MODEINITPRED)) { xfact = ckt->CKTdelta / ckt->CKTdeltaOld[1]; *(ckt->CKTstate0 + here->BSIM3vbs) = *(ckt->CKTstate1 + here->BSIM3vbs); vbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3vbs)) - (xfact * (*(ckt->CKTstate2 + here->BSIM3vbs))); *(ckt->CKTstate0 + here->BSIM3vgs) = *(ckt->CKTstate1 + here->BSIM3vgs); vgs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3vgs)) - (xfact * (*(ckt->CKTstate2 + here->BSIM3vgs))); *(ckt->CKTstate0 + here->BSIM3vds) = *(ckt->CKTstate1 + here->BSIM3vds); vds = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3vds)) - (xfact * (*(ckt->CKTstate2 + here->BSIM3vds))); *(ckt->CKTstate0 + here->BSIM3vbd) = *(ckt->CKTstate0 + here->BSIM3vbs) - *(ckt->CKTstate0 + here->BSIM3vds); *(ckt->CKTstate0 + here->BSIM3qdef) = *(ckt->CKTstate1 + here->BSIM3qdef); qdef = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3qdef)) -(xfact * (*(ckt->CKTstate2 + here->BSIM3qdef))); } else { #endif /* PREDICTOR */ vbs = model->BSIM3type * (*(ckt->CKTrhsOld + here->BSIM3bNode) - *(ckt->CKTrhsOld + here->BSIM3sNodePrime)); vgs = model->BSIM3type * (*(ckt->CKTrhsOld + here->BSIM3gNode) - *(ckt->CKTrhsOld + here->BSIM3sNodePrime)); vds = model->BSIM3type * (*(ckt->CKTrhsOld + here->BSIM3dNodePrime) - *(ckt->CKTrhsOld + here->BSIM3sNodePrime)); qdef = model->BSIM3type * (*(ckt->CKTrhsOld + here->BSIM3qNode)); #ifndef PREDICTOR } #endif /* PREDICTOR */ vbd = vbs - vds; vgd = vgs - vds; vgdo = *(ckt->CKTstate0 + here->BSIM3vgs) - *(ckt->CKTstate0 + here->BSIM3vds); delvbs = vbs - *(ckt->CKTstate0 + here->BSIM3vbs); delvbd = vbd - *(ckt->CKTstate0 + here->BSIM3vbd); delvgs = vgs - *(ckt->CKTstate0 + here->BSIM3vgs); delvds = vds - *(ckt->CKTstate0 + here->BSIM3vds); delvgd = vgd - vgdo; if (here->BSIM3mode >= 0) { Idtot = here->BSIM3cd + here->BSIM3csub - here->BSIM3cbd; cdhat = Idtot - here->BSIM3gbd * delvbd + (here->BSIM3gmbs + here->BSIM3gbbs) * delvbs + (here->BSIM3gm + here->BSIM3gbgs) * delvgs + (here->BSIM3gds + here->BSIM3gbds) * delvds; Ibtot = here->BSIM3cbs + here->BSIM3cbd - here->BSIM3csub; cbhat = Ibtot + here->BSIM3gbd * delvbd + (here->BSIM3gbs - here->BSIM3gbbs) * delvbs - here->BSIM3gbgs * delvgs - here->BSIM3gbds * delvds; } else { Idtot = here->BSIM3cd - here->BSIM3cbd; cdhat = Idtot - (here->BSIM3gbd - here->BSIM3gmbs) * delvbd + here->BSIM3gm * delvgd - here->BSIM3gds * delvds; Ibtot = here->BSIM3cbs + here->BSIM3cbd - here->BSIM3csub; cbhat = Ibtot + here->BSIM3gbs * delvbs + (here->BSIM3gbd - here->BSIM3gbbs) * delvbd - here->BSIM3gbgs * delvgd + here->BSIM3gbds * delvds; } #ifndef NOBYPASS /* following should be one big if connected by && all over * the place, but some C compilers can't handle that, so * we split it up here to let them digest it in stages */ if ((!(ckt->CKTmode & MODEINITPRED)) && (ckt->CKTbypass)) if ((fabs(delvbs) < (ckt->CKTreltol * MAX(fabs(vbs), fabs(*(ckt->CKTstate0+here->BSIM3vbs))) + ckt->CKTvoltTol))) if ((fabs(delvbd) < (ckt->CKTreltol * MAX(fabs(vbd), fabs(*(ckt->CKTstate0+here->BSIM3vbd))) + ckt->CKTvoltTol))) if ((fabs(delvgs) < (ckt->CKTreltol * MAX(fabs(vgs), fabs(*(ckt->CKTstate0+here->BSIM3vgs))) + ckt->CKTvoltTol))) if ((fabs(delvds) < (ckt->CKTreltol * MAX(fabs(vds), fabs(*(ckt->CKTstate0+here->BSIM3vds))) + ckt->CKTvoltTol))) if ((fabs(cdhat - Idtot) < ckt->CKTreltol * MAX(fabs(cdhat),fabs(Idtot)) + ckt->CKTabstol)) { tempv = MAX(fabs(cbhat),fabs(Ibtot)) + ckt->CKTabstol; if ((fabs(cbhat - Ibtot)) < ckt->CKTreltol * tempv) { /* bypass code */ vbs = *(ckt->CKTstate0 + here->BSIM3vbs); vbd = *(ckt->CKTstate0 + here->BSIM3vbd); vgs = *(ckt->CKTstate0 + here->BSIM3vgs); vds = *(ckt->CKTstate0 + here->BSIM3vds); qdef = *(ckt->CKTstate0 + here->BSIM3qdef); vgd = vgs - vds; vgb = vgs - vbs; cdrain = here->BSIM3cd; if ((ckt->CKTmode & (MODETRAN | MODEAC)) || ((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC))) { ByPass = 1; qgate = here->BSIM3qgate; qbulk = here->BSIM3qbulk; qdrn = here->BSIM3qdrn; goto line755; } else { goto line850; } } } #endif /*NOBYPASS*/ von = here->BSIM3von; if (*(ckt->CKTstate0 + here->BSIM3vds) >= 0.0) { vgs = DEVfetlim(vgs, *(ckt->CKTstate0+here->BSIM3vgs), von); vds = vgs - vgd; vds = DEVlimvds(vds, *(ckt->CKTstate0 + here->BSIM3vds)); vgd = vgs - vds; } else { vgd = DEVfetlim(vgd, vgdo, von); vds = vgs - vgd; vds = -DEVlimvds(-vds, -(*(ckt->CKTstate0+here->BSIM3vds))); vgs = vgd + vds; } if (vds >= 0.0) { vbs = DEVpnjlim(vbs, *(ckt->CKTstate0 + here->BSIM3vbs), CONSTvt0, model->BSIM3vcrit, &Check); vbd = vbs - vds; } else { vbd = DEVpnjlim(vbd, *(ckt->CKTstate0 + here->BSIM3vbd), CONSTvt0, model->BSIM3vcrit, &Check); vbs = vbd + vds; } } /* determine DC current and derivatives */ vbd = vbs - vds; vgd = vgs - vds; vgb = vgs - vbs; /* Source/drain junction diode DC model begins */ Nvtm = model->BSIM3vtm * model->BSIM3jctEmissionCoeff; /* acm model */ if (model->BSIM3acmMod == 0) { if ((here->BSIM3sourceArea <= 0.0) && (here->BSIM3sourcePerimeter <= 0.0)) { SourceSatCurrent = 1.0e-14; } else { SourceSatCurrent = here->BSIM3sourceArea * model->BSIM3jctTempSatCurDensity + here->BSIM3sourcePerimeter * model->BSIM3jctSidewallTempSatCurDensity; } if ((here->BSIM3drainArea <= 0.0) && (here->BSIM3drainPerimeter <= 0.0)) { DrainSatCurrent = 1.0e-14; } else { DrainSatCurrent = here->BSIM3drainArea * model->BSIM3jctTempSatCurDensity + here->BSIM3drainPerimeter * model->BSIM3jctSidewallTempSatCurDensity; } } else { error = ACM_saturationCurrents( model->BSIM3acmMod, model->BSIM3calcacm, here->BSIM3geo, model->BSIM3hdif, model->BSIM3wmlt, here->BSIM3w, model->BSIM3xw, model->BSIM3jctTempSatCurDensity, model->BSIM3jctSidewallTempSatCurDensity, here->BSIM3drainAreaGiven, here->BSIM3drainArea, here->BSIM3drainPerimeterGiven, here->BSIM3drainPerimeter, here->BSIM3sourceAreaGiven, here->BSIM3sourceArea, here->BSIM3sourcePerimeterGiven, here->BSIM3sourcePerimeter, &DrainSatCurrent, &SourceSatCurrent ); if (error) return(error); } if (SourceSatCurrent <= 0.0) { here->BSIM3gbs = ckt->CKTgmin; here->BSIM3cbs = here->BSIM3gbs * vbs; } else { if (model->BSIM3ijth == 0.0) { evbs = exp(vbs / Nvtm); here->BSIM3gbs = SourceSatCurrent * evbs / Nvtm + ckt->CKTgmin; here->BSIM3cbs = SourceSatCurrent * (evbs - 1.0) + ckt->CKTgmin * vbs; } else { if (vbs < here->BSIM3vjsm) { evbs = exp(vbs / Nvtm); here->BSIM3gbs = SourceSatCurrent * evbs / Nvtm + ckt->CKTgmin; here->BSIM3cbs = SourceSatCurrent * (evbs - 1.0) + ckt->CKTgmin * vbs; } else { T0 = here->BSIM3IsEvjsm / Nvtm; here->BSIM3gbs = T0 + ckt->CKTgmin; here->BSIM3cbs = here->BSIM3IsEvjsm - SourceSatCurrent + T0 * (vbs - here->BSIM3vjsm) + ckt->CKTgmin * vbs; } } } if (DrainSatCurrent <= 0.0) { here->BSIM3gbd = ckt->CKTgmin; here->BSIM3cbd = here->BSIM3gbd * vbd; } else { if (model->BSIM3ijth == 0.0) { evbd = exp(vbd / Nvtm); here->BSIM3gbd = DrainSatCurrent * evbd / Nvtm + ckt->CKTgmin; here->BSIM3cbd = DrainSatCurrent * (evbd - 1.0) + ckt->CKTgmin * vbd; } else { if (vbd < here->BSIM3vjdm) { evbd = exp(vbd / Nvtm); here->BSIM3gbd = DrainSatCurrent * evbd / Nvtm + ckt->CKTgmin; here->BSIM3cbd = DrainSatCurrent * (evbd - 1.0) + ckt->CKTgmin * vbd; } else { T0 = here->BSIM3IsEvjdm / Nvtm; here->BSIM3gbd = T0 + ckt->CKTgmin; here->BSIM3cbd = here->BSIM3IsEvjdm - DrainSatCurrent + T0 * (vbd - here->BSIM3vjdm) + ckt->CKTgmin * vbd; } } } /* End of diode DC model */ if (vds >= 0.0) { /* normal mode */ here->BSIM3mode = 1; Vds = vds; Vgs = vgs; Vbs = vbs; } else { /* inverse mode */ here->BSIM3mode = -1; Vds = -vds; Vgs = vgd; Vbs = vbd; } T0 = Vbs - pParam->BSIM3vbsc - 0.001; T1 = sqrt(T0 * T0 - 0.004 * pParam->BSIM3vbsc); Vbseff = pParam->BSIM3vbsc + 0.5 * (T0 + T1); dVbseff_dVb = 0.5 * (1.0 + T0 / T1); if (Vbseff < Vbs) { Vbseff = Vbs; } if (Vbseff > 0.0) { T0 = pParam->BSIM3phi / (pParam->BSIM3phi + Vbseff); Phis = pParam->BSIM3phi * T0; dPhis_dVb = -T0 * T0; sqrtPhis = pParam->BSIM3phis3 / (pParam->BSIM3phi + 0.5 * Vbseff); dsqrtPhis_dVb = -0.5 * sqrtPhis * sqrtPhis / pParam->BSIM3phis3; } else { Phis = pParam->BSIM3phi - Vbseff; dPhis_dVb = -1.0; sqrtPhis = sqrt(Phis); dsqrtPhis_dVb = -0.5 / sqrtPhis; } Xdep = pParam->BSIM3Xdep0 * sqrtPhis / pParam->BSIM3sqrtPhi; dXdep_dVb = (pParam->BSIM3Xdep0 / pParam->BSIM3sqrtPhi) * dsqrtPhis_dVb; Leff = pParam->BSIM3leff; Vtm = model->BSIM3vtm; /* Vth Calculation */ T3 = sqrt(Xdep); V0 = pParam->BSIM3vbi - pParam->BSIM3phi; T0 = pParam->BSIM3dvt2 * Vbseff; if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->BSIM3dvt2; } else /* Added to avoid any discontinuity problems caused by dvt2 */ { T4 = 1.0 / (3.0 + 8.0 * T0); T1 = (1.0 + 3.0 * T0) * T4; T2 = pParam->BSIM3dvt2 * T4 * T4; } lt1 = model->BSIM3factor1 * T3 * T1; dlt1_dVb = model->BSIM3factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2); T0 = pParam->BSIM3dvt2w * Vbseff; if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->BSIM3dvt2w; } else /* Added to avoid any discontinuity problems caused by dvt2w */ { T4 = 1.0 / (3.0 + 8.0 * T0); T1 = (1.0 + 3.0 * T0) * T4; T2 = pParam->BSIM3dvt2w * T4 * T4; } ltw = model->BSIM3factor1 * T3 * T1; dltw_dVb = model->BSIM3factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2); T0 = -0.5 * pParam->BSIM3dvt1 * Leff / lt1; if (T0 > -EXP_THRESHOLD) { T1 = exp(T0); Theta0 = T1 * (1.0 + 2.0 * T1); dT1_dVb = -T0 / lt1 * T1 * dlt1_dVb; dTheta0_dVb = (1.0 + 4.0 * T1) * dT1_dVb; } else { T1 = MIN_EXP; Theta0 = T1 * (1.0 + 2.0 * T1); dTheta0_dVb = 0.0; } here->BSIM3thetavth = pParam->BSIM3dvt0 * Theta0; Delt_vth = here->BSIM3thetavth * V0; dDelt_vth_dVb = pParam->BSIM3dvt0 * dTheta0_dVb * V0; T0 = -0.5 * pParam->BSIM3dvt1w * pParam->BSIM3weff * Leff / ltw; if (T0 > -EXP_THRESHOLD) { T1 = exp(T0); T2 = T1 * (1.0 + 2.0 * T1); dT1_dVb = -T0 / ltw * T1 * dltw_dVb; dT2_dVb = (1.0 + 4.0 * T1) * dT1_dVb; } else { T1 = MIN_EXP; T2 = T1 * (1.0 + 2.0 * T1); dT2_dVb = 0.0; } T0 = pParam->BSIM3dvt0w * T2; T2 = T0 * V0; dT2_dVb = pParam->BSIM3dvt0w * dT2_dVb * V0; TempRatio = ckt->CKTtemp / model->BSIM3tnom - 1.0; T0 = sqrt(1.0 + pParam->BSIM3nlx / Leff); T1 = pParam->BSIM3k1ox * (T0 - 1.0) * pParam->BSIM3sqrtPhi + (pParam->BSIM3kt1 + pParam->BSIM3kt1l / Leff + pParam->BSIM3kt2 * Vbseff) * TempRatio; tmp2 = model->BSIM3tox * pParam->BSIM3phi / (pParam->BSIM3weff + pParam->BSIM3w0); T3 = pParam->BSIM3eta0 + pParam->BSIM3etab * Vbseff; if (T3 < 1.0e-4) /* avoid discontinuity problems caused by etab */ { T9 = 1.0 / (3.0 - 2.0e4 * T3); T3 = (2.0e-4 - T3) * T9; T4 = T9 * T9; } else { T4 = 1.0; } dDIBL_Sft_dVd = T3 * pParam->BSIM3theta0vb0; DIBL_Sft = dDIBL_Sft_dVd * Vds; Vth = model->BSIM3type * here->BSIM3vth0 - pParam->BSIM3k1 * pParam->BSIM3sqrtPhi + pParam->BSIM3k1ox * sqrtPhis - pParam->BSIM3k2ox * Vbseff - Delt_vth - T2 + (pParam->BSIM3k3 + pParam->BSIM3k3b * Vbseff) * tmp2 + T1 - DIBL_Sft; here->BSIM3von = Vth; dVth_dVb = pParam->BSIM3k1ox * dsqrtPhis_dVb - pParam->BSIM3k2ox - dDelt_vth_dVb - dT2_dVb + pParam->BSIM3k3b * tmp2 - pParam->BSIM3etab * Vds * pParam->BSIM3theta0vb0 * T4 + pParam->BSIM3kt2 * TempRatio; dVth_dVd = -dDIBL_Sft_dVd; /* Calculate n */ tmp2 = pParam->BSIM3nfactor * EPSSI / Xdep; tmp3 = pParam->BSIM3cdsc + pParam->BSIM3cdscb * Vbseff + pParam->BSIM3cdscd * Vds; tmp4 = (tmp2 + tmp3 * Theta0 + pParam->BSIM3cit) / model->BSIM3cox; if (tmp4 >= -0.5) { n = 1.0 + tmp4; dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb + pParam->BSIM3cdscb * Theta0) / model->BSIM3cox; dn_dVd = pParam->BSIM3cdscd * Theta0 / model->BSIM3cox; } else /* avoid discontinuity problems caused by tmp4 */ { T0 = 1.0 / (3.0 + 8.0 * tmp4); n = (1.0 + 3.0 * tmp4) * T0; T0 *= T0; dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb + pParam->BSIM3cdscb * Theta0) / model->BSIM3cox * T0; dn_dVd = pParam->BSIM3cdscd * Theta0 / model->BSIM3cox * T0; } /* Poly Gate Si Depletion Effect */ T0 = here->BSIM3vfb + pParam->BSIM3phi; if ((pParam->BSIM3ngate > 1.e18) && (pParam->BSIM3ngate < 1.e25) && (Vgs > T0)) /* added to avoid the problem caused by ngate */ { T1 = 1.0e6 * Charge_q * EPSSI * pParam->BSIM3ngate / (model->BSIM3cox * model->BSIM3cox); T4 = sqrt(1.0 + 2.0 * (Vgs - T0) / T1); T2 = T1 * (T4 - 1.0); T3 = 0.5 * T2 * T2 / T1; /* T3 = Vpoly */ T7 = 1.12 - T3 - 0.05; T6 = sqrt(T7 * T7 + 0.224); T5 = 1.12 - 0.5 * (T7 + T6); Vgs_eff = Vgs - T5; dVgs_eff_dVg = 1.0 - (0.5 - 0.5 / T4) * (1.0 + T7 / T6); } else { Vgs_eff = Vgs; dVgs_eff_dVg = 1.0; } Vgst = Vgs_eff - Vth; /* Effective Vgst (Vgsteff) Calculation */ T10 = 2.0 * n * Vtm; VgstNVt = Vgst / T10; ExpArg = (2.0 * pParam->BSIM3voff - Vgst) / T10; /* MCJ: Very small Vgst */ if (VgstNVt > EXP_THRESHOLD) { Vgsteff = Vgst; dVgsteff_dVg = dVgs_eff_dVg; dVgsteff_dVd = -dVth_dVd; dVgsteff_dVb = -dVth_dVb; } else if (ExpArg > EXP_THRESHOLD) { T0 = (Vgst - pParam->BSIM3voff) / (n * Vtm); ExpVgst = exp(T0); Vgsteff = Vtm * pParam->BSIM3cdep0 / model->BSIM3cox * ExpVgst; dVgsteff_dVg = Vgsteff / (n * Vtm); dVgsteff_dVd = -dVgsteff_dVg * (dVth_dVd + T0 * Vtm * dn_dVd); dVgsteff_dVb = -dVgsteff_dVg * (dVth_dVb + T0 * Vtm * dn_dVb); dVgsteff_dVg *= dVgs_eff_dVg; } else { ExpVgst = exp(VgstNVt); T1 = T10 * log(1.0 + ExpVgst); dT1_dVg = ExpVgst / (1.0 + ExpVgst); dT1_dVb = -dT1_dVg * (dVth_dVb + Vgst / n * dn_dVb) + T1 / n * dn_dVb; dT1_dVd = -dT1_dVg * (dVth_dVd + Vgst / n * dn_dVd) + T1 / n * dn_dVd; dT2_dVg = -model->BSIM3cox / (Vtm * pParam->BSIM3cdep0) * exp(ExpArg); T2 = 1.0 - T10 * dT2_dVg; dT2_dVd = -dT2_dVg * (dVth_dVd - 2.0 * Vtm * ExpArg * dn_dVd) + (T2 - 1.0) / n * dn_dVd; dT2_dVb = -dT2_dVg * (dVth_dVb - 2.0 * Vtm * ExpArg * dn_dVb) + (T2 - 1.0) / n * dn_dVb; Vgsteff = T1 / T2; T3 = T2 * T2; dVgsteff_dVg = (T2 * dT1_dVg - T1 * dT2_dVg) / T3 * dVgs_eff_dVg; dVgsteff_dVd = (T2 * dT1_dVd - T1 * dT2_dVd) / T3; dVgsteff_dVb = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; } here->BSIM3Vgsteff = Vgsteff; /* Calculate Effective Channel Geometry */ T9 = sqrtPhis - pParam->BSIM3sqrtPhi; Weff = pParam->BSIM3weff - 2.0 * (pParam->BSIM3dwg * Vgsteff + pParam->BSIM3dwb * T9); dWeff_dVg = -2.0 * pParam->BSIM3dwg; dWeff_dVb = -2.0 * pParam->BSIM3dwb * dsqrtPhis_dVb; if (Weff < 2.0e-8) /* to avoid the discontinuity problem due to Weff*/ { T0 = 1.0 / (6.0e-8 - 2.0 * Weff); Weff = 2.0e-8 * (4.0e-8 - Weff) * T0; T0 *= T0 * 4.0e-16; dWeff_dVg *= T0; dWeff_dVb *= T0; } T0 = pParam->BSIM3prwg * Vgsteff + pParam->BSIM3prwb * T9; if (T0 >= -0.9) { Rds = pParam->BSIM3rds0 * (1.0 + T0); dRds_dVg = pParam->BSIM3rds0 * pParam->BSIM3prwg; dRds_dVb = pParam->BSIM3rds0 * pParam->BSIM3prwb * dsqrtPhis_dVb; } else /* to avoid the discontinuity problem due to prwg and prwb*/ { T1 = 1.0 / (17.0 + 20.0 * T0); Rds = pParam->BSIM3rds0 * (0.8 + T0) * T1; T1 *= T1; dRds_dVg = pParam->BSIM3rds0 * pParam->BSIM3prwg * T1; dRds_dVb = pParam->BSIM3rds0 * pParam->BSIM3prwb * dsqrtPhis_dVb * T1; } here->BSIM3rds = Rds; /* Noise Bugfix */ /* Calculate Abulk */ T1 = 0.5 * pParam->BSIM3k1ox / sqrtPhis; dT1_dVb = -T1 / sqrtPhis * dsqrtPhis_dVb; T9 = sqrt(pParam->BSIM3xj * Xdep); tmp1 = Leff + 2.0 * T9; T5 = Leff / tmp1; tmp2 = pParam->BSIM3a0 * T5; tmp3 = pParam->BSIM3weff + pParam->BSIM3b1; tmp4 = pParam->BSIM3b0 / tmp3; T2 = tmp2 + tmp4; dT2_dVb = -T9 / tmp1 / Xdep * dXdep_dVb; T6 = T5 * T5; T7 = T5 * T6; Abulk0 = 1.0 + T1 * T2; dAbulk0_dVb = T1 * tmp2 * dT2_dVb + T2 * dT1_dVb; T8 = pParam->BSIM3ags * pParam->BSIM3a0 * T7; dAbulk_dVg = -T1 * T8; Abulk = Abulk0 + dAbulk_dVg * Vgsteff; dAbulk_dVb = dAbulk0_dVb - T8 * Vgsteff * (dT1_dVb + 3.0 * T1 * dT2_dVb); if (Abulk0 < 0.1) /* added to avoid the problems caused by Abulk0 */ { T9 = 1.0 / (3.0 - 20.0 * Abulk0); Abulk0 = (0.2 - Abulk0) * T9; dAbulk0_dVb *= T9 * T9; } if (Abulk < 0.1) /* added to avoid the problems caused by Abulk */ { T9 = 1.0 / (3.0 - 20.0 * Abulk); Abulk = (0.2 - Abulk) * T9; T10 = T9 * T9; dAbulk_dVb *= T10; dAbulk_dVg *= T10; } here->BSIM3Abulk = Abulk; T2 = pParam->BSIM3keta * Vbseff; if (T2 >= -0.9) { T0 = 1.0 / (1.0 + T2); dT0_dVb = -pParam->BSIM3keta * T0 * T0; } else /* added to avoid the problems caused by Keta */ { T1 = 1.0 / (0.8 + T2); T0 = (17.0 + 20.0 * T2) * T1; dT0_dVb = -pParam->BSIM3keta * T1 * T1; } dAbulk_dVg *= T0; dAbulk_dVb = dAbulk_dVb * T0 + Abulk * dT0_dVb; dAbulk0_dVb = dAbulk0_dVb * T0 + Abulk0 * dT0_dVb; Abulk *= T0; Abulk0 *= T0; /* Mobility calculation */ if (model->BSIM3mobMod == 1) { T0 = Vgsteff + Vth + Vth; T2 = pParam->BSIM3ua + pParam->BSIM3uc * Vbseff; T3 = T0 / model->BSIM3tox; T5 = T3 * (T2 + pParam->BSIM3ub * T3); dDenomi_dVg = (T2 + 2.0 * pParam->BSIM3ub * T3) / model->BSIM3tox; dDenomi_dVd = dDenomi_dVg * 2.0 * dVth_dVd; dDenomi_dVb = dDenomi_dVg * 2.0 * dVth_dVb + pParam->BSIM3uc * T3; } else if (model->BSIM3mobMod == 2) { T5 = Vgsteff / model->BSIM3tox * (pParam->BSIM3ua + pParam->BSIM3uc * Vbseff + pParam->BSIM3ub * Vgsteff / model->BSIM3tox); dDenomi_dVg = (pParam->BSIM3ua + pParam->BSIM3uc * Vbseff + 2.0 * pParam->BSIM3ub * Vgsteff / model->BSIM3tox) / model->BSIM3tox; dDenomi_dVd = 0.0; dDenomi_dVb = Vgsteff * pParam->BSIM3uc / model->BSIM3tox; } else { T0 = Vgsteff + Vth + Vth; T2 = 1.0 + pParam->BSIM3uc * Vbseff; T3 = T0 / model->BSIM3tox; T4 = T3 * (pParam->BSIM3ua + pParam->BSIM3ub * T3); T5 = T4 * T2; dDenomi_dVg = (pParam->BSIM3ua + 2.0 * pParam->BSIM3ub * T3) * T2 / model->BSIM3tox; dDenomi_dVd = dDenomi_dVg * 2.0 * dVth_dVd; dDenomi_dVb = dDenomi_dVg * 2.0 * dVth_dVb + pParam->BSIM3uc * T4; } if (T5 >= -0.8) { Denomi = 1.0 + T5; } else /* Added to avoid the discontinuity problem caused by ua and ub*/ { T9 = 1.0 / (7.0 + 10.0 * T5); Denomi = (0.6 + T5) * T9; T9 *= T9; dDenomi_dVg *= T9; dDenomi_dVd *= T9; dDenomi_dVb *= T9; } here->BSIM3ueff = ueff = here->BSIM3u0temp / Denomi; T9 = -ueff / Denomi; dueff_dVg = T9 * dDenomi_dVg; dueff_dVd = T9 * dDenomi_dVd; dueff_dVb = T9 * dDenomi_dVb; /* Saturation Drain Voltage Vdsat */ WVCox = Weff * pParam->BSIM3vsattemp * model->BSIM3cox; WVCoxRds = WVCox * Rds; Esat = 2.0 * pParam->BSIM3vsattemp / ueff; EsatL = Esat * Leff; T0 = -EsatL /ueff; dEsatL_dVg = T0 * dueff_dVg; dEsatL_dVd = T0 * dueff_dVd; dEsatL_dVb = T0 * dueff_dVb; /* Sqrt() */ a1 = pParam->BSIM3a1; if (a1 == 0.0) { Lambda = pParam->BSIM3a2; dLambda_dVg = 0.0; } else if (a1 > 0.0) /* Added to avoid the discontinuity problem caused by a1 and a2 (Lambda) */ { T0 = 1.0 - pParam->BSIM3a2; T1 = T0 - pParam->BSIM3a1 * Vgsteff - 0.0001; T2 = sqrt(T1 * T1 + 0.0004 * T0); Lambda = pParam->BSIM3a2 + T0 - 0.5 * (T1 + T2); dLambda_dVg = 0.5 * pParam->BSIM3a1 * (1.0 + T1 / T2); } else { T1 = pParam->BSIM3a2 + pParam->BSIM3a1 * Vgsteff - 0.0001; T2 = sqrt(T1 * T1 + 0.0004 * pParam->BSIM3a2); Lambda = 0.5 * (T1 + T2); dLambda_dVg = 0.5 * pParam->BSIM3a1 * (1.0 + T1 / T2); } Vgst2Vtm = Vgsteff + 2.0 * Vtm; here->BSIM3AbovVgst2Vtm = Abulk / Vgst2Vtm; if (Rds > 0) { tmp2 = dRds_dVg / Rds + dWeff_dVg / Weff; tmp3 = dRds_dVb / Rds + dWeff_dVb / Weff; } else { tmp2 = dWeff_dVg / Weff; tmp3 = dWeff_dVb / Weff; } if ((Rds == 0.0) && (Lambda == 1.0)) { T0 = 1.0 / (Abulk * EsatL + Vgst2Vtm); tmp1 = 0.0; T1 = T0 * T0; T2 = Vgst2Vtm * T0; T3 = EsatL * Vgst2Vtm; Vdsat = T3 * T0; dT0_dVg = -(Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 1.0) * T1; dT0_dVd = -(Abulk * dEsatL_dVd) * T1; dT0_dVb = -(Abulk * dEsatL_dVb + dAbulk_dVb * EsatL) * T1; dVdsat_dVg = T3 * dT0_dVg + T2 * dEsatL_dVg + EsatL * T0; dVdsat_dVd = T3 * dT0_dVd + T2 * dEsatL_dVd; dVdsat_dVb = T3 * dT0_dVb + T2 * dEsatL_dVb; } else { tmp1 = dLambda_dVg / (Lambda * Lambda); T9 = Abulk * WVCoxRds; T8 = Abulk * T9; T7 = Vgst2Vtm * T9; T6 = Vgst2Vtm * WVCoxRds; T0 = 2.0 * Abulk * (T9 - 1.0 + 1.0 / Lambda); dT0_dVg = 2.0 * (T8 * tmp2 - Abulk * tmp1 + (2.0 * T9 + 1.0 / Lambda - 1.0) * dAbulk_dVg); dT0_dVb = 2.0 * (T8 * (2.0 / Abulk * dAbulk_dVb + tmp3) + (1.0 / Lambda - 1.0) * dAbulk_dVb); dT0_dVd = 0.0; T1 = Vgst2Vtm * (2.0 / Lambda - 1.0) + Abulk * EsatL + 3.0 * T7; dT1_dVg = (2.0 / Lambda - 1.0) - 2.0 * Vgst2Vtm * tmp1 + Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 3.0 * (T9 + T7 * tmp2 + T6 * dAbulk_dVg); dT1_dVb = Abulk * dEsatL_dVb + EsatL * dAbulk_dVb + 3.0 * (T6 * dAbulk_dVb + T7 * tmp3); dT1_dVd = Abulk * dEsatL_dVd; T2 = Vgst2Vtm * (EsatL + 2.0 * T6); dT2_dVg = EsatL + Vgst2Vtm * dEsatL_dVg + T6 * (4.0 + 2.0 * Vgst2Vtm * tmp2); dT2_dVb = Vgst2Vtm * (dEsatL_dVb + 2.0 * T6 * tmp3); dT2_dVd = Vgst2Vtm * dEsatL_dVd; T3 = sqrt(T1 * T1 - 2.0 * T0 * T2); Vdsat = (T1 - T3) / T0; dT3_dVg = (T1 * dT1_dVg - 2.0 * (T0 * dT2_dVg + T2 * dT0_dVg)) / T3; dT3_dVd = (T1 * dT1_dVd - 2.0 * (T0 * dT2_dVd + T2 * dT0_dVd)) / T3; dT3_dVb = (T1 * dT1_dVb - 2.0 * (T0 * dT2_dVb + T2 * dT0_dVb)) / T3; dVdsat_dVg = (dT1_dVg - (T1 * dT1_dVg - dT0_dVg * T2 - T0 * dT2_dVg) / T3 - Vdsat * dT0_dVg) / T0; dVdsat_dVb = (dT1_dVb - (T1 * dT1_dVb - dT0_dVb * T2 - T0 * dT2_dVb) / T3 - Vdsat * dT0_dVb) / T0; dVdsat_dVd = (dT1_dVd - (T1 * dT1_dVd - T0 * dT2_dVd) / T3) / T0; } here->BSIM3vdsat = Vdsat; /* Effective Vds (Vdseff) Calculation */ T1 = Vdsat - Vds - pParam->BSIM3delta; dT1_dVg = dVdsat_dVg; dT1_dVd = dVdsat_dVd - 1.0; dT1_dVb = dVdsat_dVb; T2 = sqrt(T1 * T1 + 4.0 * pParam->BSIM3delta * Vdsat); T0 = T1 / T2; T3 = 2.0 * pParam->BSIM3delta / T2; dT2_dVg = T0 * dT1_dVg + T3 * dVdsat_dVg; dT2_dVd = T0 * dT1_dVd + T3 * dVdsat_dVd; dT2_dVb = T0 * dT1_dVb + T3 * dVdsat_dVb; Vdseff = Vdsat - 0.5 * (T1 + T2); dVdseff_dVg = dVdsat_dVg - 0.5 * (dT1_dVg + dT2_dVg); dVdseff_dVd = dVdsat_dVd - 0.5 * (dT1_dVd + dT2_dVd); dVdseff_dVb = dVdsat_dVb - 0.5 * (dT1_dVb + dT2_dVb); /* Added to eliminate non-zero Vdseff at Vds=0.0 */ if (Vds == 0.0) { Vdseff = 0.0; dVdseff_dVg = 0.0; dVdseff_dVb = 0.0; } /* Calculate VAsat */ tmp4 = 1.0 - 0.5 * Abulk * Vdsat / Vgst2Vtm; T9 = WVCoxRds * Vgsteff; T8 = T9 / Vgst2Vtm; T0 = EsatL + Vdsat + 2.0 * T9 * tmp4; T7 = 2.0 * WVCoxRds * tmp4; dT0_dVg = dEsatL_dVg + dVdsat_dVg + T7 * (1.0 + tmp2 * Vgsteff) - T8 * (Abulk * dVdsat_dVg - Abulk * Vdsat / Vgst2Vtm + Vdsat * dAbulk_dVg); dT0_dVb = dEsatL_dVb + dVdsat_dVb + T7 * tmp3 * Vgsteff - T8 * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb); dT0_dVd = dEsatL_dVd + dVdsat_dVd - T8 * Abulk * dVdsat_dVd; T9 = WVCoxRds * Abulk; T1 = 2.0 / Lambda - 1.0 + T9; dT1_dVg = -2.0 * tmp1 + WVCoxRds * (Abulk * tmp2 + dAbulk_dVg); dT1_dVb = dAbulk_dVb * WVCoxRds + T9 * tmp3; Vasat = T0 / T1; dVasat_dVg = (dT0_dVg - Vasat * dT1_dVg) / T1; dVasat_dVb = (dT0_dVb - Vasat * dT1_dVb) / T1; dVasat_dVd = dT0_dVd / T1; if (Vdseff > Vds) Vdseff = Vds; diffVds = Vds - Vdseff; here->BSIM3Vdseff = Vdseff; /* Calculate VACLM */ if ((pParam->BSIM3pclm > 0.0) && (diffVds > 1.0e-10)) { T0 = 1.0 / (pParam->BSIM3pclm * Abulk * pParam->BSIM3litl); dT0_dVb = -T0 / Abulk * dAbulk_dVb; dT0_dVg = -T0 / Abulk * dAbulk_dVg; T2 = Vgsteff / EsatL; T1 = Leff * (Abulk + T2); dT1_dVg = Leff * ((1.0 - T2 * dEsatL_dVg) / EsatL + dAbulk_dVg); dT1_dVb = Leff * (dAbulk_dVb - T2 * dEsatL_dVb / EsatL); dT1_dVd = -T2 * dEsatL_dVd / Esat; T9 = T0 * T1; VACLM = T9 * diffVds; dVACLM_dVg = T0 * dT1_dVg * diffVds - T9 * dVdseff_dVg + T1 * diffVds * dT0_dVg; dVACLM_dVb = (dT0_dVb * T1 + T0 * dT1_dVb) * diffVds - T9 * dVdseff_dVb; dVACLM_dVd = T0 * dT1_dVd * diffVds + T9 * (1.0 - dVdseff_dVd); } else { VACLM = MAX_EXP; dVACLM_dVd = dVACLM_dVg = dVACLM_dVb = 0.0; } /* Calculate VADIBL */ if (pParam->BSIM3thetaRout > 0.0) { T8 = Abulk * Vdsat; T0 = Vgst2Vtm * T8; dT0_dVg = Vgst2Vtm * Abulk * dVdsat_dVg + T8 + Vgst2Vtm * Vdsat * dAbulk_dVg; dT0_dVb = Vgst2Vtm * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb); dT0_dVd = Vgst2Vtm * Abulk * dVdsat_dVd; T1 = Vgst2Vtm + T8; dT1_dVg = 1.0 + Abulk * dVdsat_dVg + Vdsat * dAbulk_dVg; dT1_dVb = Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat; dT1_dVd = Abulk * dVdsat_dVd; T9 = T1 * T1; T2 = pParam->BSIM3thetaRout; VADIBL = (Vgst2Vtm - T0 / T1) / T2; dVADIBL_dVg = (1.0 - dT0_dVg / T1 + T0 * dT1_dVg / T9) / T2; dVADIBL_dVb = (-dT0_dVb / T1 + T0 * dT1_dVb / T9) / T2; dVADIBL_dVd = (-dT0_dVd / T1 + T0 * dT1_dVd / T9) / T2; T7 = pParam->BSIM3pdiblb * Vbseff; if (T7 >= -0.9) { T3 = 1.0 / (1.0 + T7); VADIBL *= T3; dVADIBL_dVg *= T3; dVADIBL_dVb = (dVADIBL_dVb - VADIBL * pParam->BSIM3pdiblb) * T3; dVADIBL_dVd *= T3; } else /* Added to avoid the discontinuity problem caused by pdiblcb */ { T4 = 1.0 / (0.8 + T7); T3 = (17.0 + 20.0 * T7) * T4; dVADIBL_dVg *= T3; dVADIBL_dVb = dVADIBL_dVb * T3 - VADIBL * pParam->BSIM3pdiblb * T4 * T4; dVADIBL_dVd *= T3; VADIBL *= T3; } } else { VADIBL = MAX_EXP; dVADIBL_dVd = dVADIBL_dVg = dVADIBL_dVb = 0.0; } /* Calculate VA */ T8 = pParam->BSIM3pvag / EsatL; T9 = T8 * Vgsteff; if (T9 > -0.9) { T0 = 1.0 + T9; dT0_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL); dT0_dVb = -T9 * dEsatL_dVb / EsatL; dT0_dVd = -T9 * dEsatL_dVd / EsatL; } else /* Added to avoid the discontinuity problems caused by pvag */ { T1 = 1.0 / (17.0 + 20.0 * T9); T0 = (0.8 + T9) * T1; T1 *= T1; dT0_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL) * T1; T9 *= T1 / EsatL; dT0_dVb = -T9 * dEsatL_dVb; dT0_dVd = -T9 * dEsatL_dVd; } tmp1 = VACLM * VACLM; tmp2 = VADIBL * VADIBL; tmp3 = VACLM + VADIBL; T1 = VACLM * VADIBL / tmp3; tmp3 *= tmp3; dT1_dVg = (tmp1 * dVADIBL_dVg + tmp2 * dVACLM_dVg) / tmp3; dT1_dVd = (tmp1 * dVADIBL_dVd + tmp2 * dVACLM_dVd) / tmp3; dT1_dVb = (tmp1 * dVADIBL_dVb + tmp2 * dVACLM_dVb) / tmp3; Va = Vasat + T0 * T1; dVa_dVg = dVasat_dVg + T1 * dT0_dVg + T0 * dT1_dVg; dVa_dVd = dVasat_dVd + T1 * dT0_dVd + T0 * dT1_dVd; dVa_dVb = dVasat_dVb + T1 * dT0_dVb + T0 * dT1_dVb; /* Calculate VASCBE */ if (pParam->BSIM3pscbe2 > 0.0) { if (diffVds > pParam->BSIM3pscbe1 * pParam->BSIM3litl / EXP_THRESHOLD) { T0 = pParam->BSIM3pscbe1 * pParam->BSIM3litl / diffVds; VASCBE = Leff * exp(T0) / pParam->BSIM3pscbe2; T1 = T0 * VASCBE / diffVds; dVASCBE_dVg = T1 * dVdseff_dVg; dVASCBE_dVd = -T1 * (1.0 - dVdseff_dVd); dVASCBE_dVb = T1 * dVdseff_dVb; } else { VASCBE = MAX_EXP * Leff/pParam->BSIM3pscbe2; dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0; } } else { VASCBE = MAX_EXP; dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0; } /* Calculate Ids */ CoxWovL = model->BSIM3cox * Weff / Leff; beta = ueff * CoxWovL; dbeta_dVg = CoxWovL * dueff_dVg + beta * dWeff_dVg / Weff; dbeta_dVd = CoxWovL * dueff_dVd; dbeta_dVb = CoxWovL * dueff_dVb + beta * dWeff_dVb / Weff; T0 = 1.0 - 0.5 * Abulk * Vdseff / Vgst2Vtm; dT0_dVg = -0.5 * (Abulk * dVdseff_dVg - Abulk * Vdseff / Vgst2Vtm + Vdseff * dAbulk_dVg) / Vgst2Vtm; dT0_dVd = -0.5 * Abulk * dVdseff_dVd / Vgst2Vtm; dT0_dVb = -0.5 * (Abulk * dVdseff_dVb + dAbulk_dVb * Vdseff) / Vgst2Vtm; fgche1 = Vgsteff * T0; dfgche1_dVg = Vgsteff * dT0_dVg + T0; dfgche1_dVd = Vgsteff * dT0_dVd; dfgche1_dVb = Vgsteff * dT0_dVb; T9 = Vdseff / EsatL; fgche2 = 1.0 + T9; dfgche2_dVg = (dVdseff_dVg - T9 * dEsatL_dVg) / EsatL; dfgche2_dVd = (dVdseff_dVd - T9 * dEsatL_dVd) / EsatL; dfgche2_dVb = (dVdseff_dVb - T9 * dEsatL_dVb) / EsatL; gche = beta * fgche1 / fgche2; dgche_dVg = (beta * dfgche1_dVg + fgche1 * dbeta_dVg - gche * dfgche2_dVg) / fgche2; dgche_dVd = (beta * dfgche1_dVd + fgche1 * dbeta_dVd - gche * dfgche2_dVd) / fgche2; dgche_dVb = (beta * dfgche1_dVb + fgche1 * dbeta_dVb - gche * dfgche2_dVb) / fgche2; T0 = 1.0 + gche * Rds; T9 = Vdseff / T0; Idl = gche * T9; dIdl_dVg = (gche * dVdseff_dVg + T9 * dgche_dVg) / T0 - Idl * gche / T0 * dRds_dVg ; dIdl_dVd = (gche * dVdseff_dVd + T9 * dgche_dVd) / T0; dIdl_dVb = (gche * dVdseff_dVb + T9 * dgche_dVb - Idl * dRds_dVb * gche) / T0; T9 = diffVds / Va; T0 = 1.0 + T9; Idsa = Idl * T0; dIdsa_dVg = T0 * dIdl_dVg - Idl * (dVdseff_dVg + T9 * dVa_dVg) / Va; dIdsa_dVd = T0 * dIdl_dVd + Idl * (1.0 - dVdseff_dVd - T9 * dVa_dVd) / Va; dIdsa_dVb = T0 * dIdl_dVb - Idl * (dVdseff_dVb + T9 * dVa_dVb) / Va; T9 = diffVds / VASCBE; T0 = 1.0 + T9; Ids = Idsa * T0; Gm = T0 * dIdsa_dVg - Idsa * (dVdseff_dVg + T9 * dVASCBE_dVg) / VASCBE; Gds = T0 * dIdsa_dVd + Idsa * (1.0 - dVdseff_dVd - T9 * dVASCBE_dVd) / VASCBE; Gmb = T0 * dIdsa_dVb - Idsa * (dVdseff_dVb + T9 * dVASCBE_dVb) / VASCBE; Gds += Gm * dVgsteff_dVd; Gmb += Gm * dVgsteff_dVb; Gm *= dVgsteff_dVg; Gmb *= dVbseff_dVb; /* Substrate current begins */ tmp = pParam->BSIM3alpha0 + pParam->BSIM3alpha1 * Leff; if ((tmp <= 0.0) || (pParam->BSIM3beta0 <= 0.0)) { Isub = Gbd = Gbb = Gbg = 0.0; } else { T2 = tmp / Leff; if (diffVds > pParam->BSIM3beta0 / EXP_THRESHOLD) { T0 = -pParam->BSIM3beta0 / diffVds; T1 = T2 * diffVds * exp(T0); T3 = T1 / diffVds * (T0 - 1.0); dT1_dVg = T3 * dVdseff_dVg; dT1_dVd = T3 * (dVdseff_dVd - 1.0); dT1_dVb = T3 * dVdseff_dVb; } else { T3 = T2 * MIN_EXP; T1 = T3 * diffVds; dT1_dVg = -T3 * dVdseff_dVg; dT1_dVd = T3 * (1.0 - dVdseff_dVd); dT1_dVb = -T3 * dVdseff_dVb; } Isub = T1 * Idsa; Gbg = T1 * dIdsa_dVg + Idsa * dT1_dVg; Gbd = T1 * dIdsa_dVd + Idsa * dT1_dVd; Gbb = T1 * dIdsa_dVb + Idsa * dT1_dVb; Gbd += Gbg * dVgsteff_dVd; Gbb += Gbg * dVgsteff_dVb; Gbg *= dVgsteff_dVg; Gbb *= dVbseff_dVb; /* bug fixing */ } cdrain = Ids; here->BSIM3gds = Gds; here->BSIM3gm = Gm; here->BSIM3gmbs = Gmb; here->BSIM3gbbs = Gbb; here->BSIM3gbgs = Gbg; here->BSIM3gbds = Gbd; here->BSIM3csub = Isub; /* BSIM3 thermal noise Qinv calculated from all capMod * 0, 1, 2 & 3 stored in here->BSIM3qinv 1/1998 */ if ((model->BSIM3xpart < 0) || (!ChargeComputationNeeded)) { qgate = qdrn = qsrc = qbulk = 0.0; here->BSIM3cggb = here->BSIM3cgsb = here->BSIM3cgdb = 0.0; here->BSIM3cdgb = here->BSIM3cdsb = here->BSIM3cddb = 0.0; here->BSIM3cbgb = here->BSIM3cbsb = here->BSIM3cbdb = 0.0; here->BSIM3cqdb = here->BSIM3cqsb = here->BSIM3cqgb = here->BSIM3cqbb = 0.0; here->BSIM3gtau = 0.0; goto finished; } else if (model->BSIM3capMod == 0) { if (Vbseff < 0.0) { Vbseff = Vbs; dVbseff_dVb = 1.0; } else { Vbseff = pParam->BSIM3phi - Phis; dVbseff_dVb = -dPhis_dVb; } Vfb = pParam->BSIM3vfbcv; Vth = Vfb + pParam->BSIM3phi + pParam->BSIM3k1ox * sqrtPhis; Vgst = Vgs_eff - Vth; dVth_dVb = pParam->BSIM3k1ox * dsqrtPhis_dVb; dVgst_dVb = -dVth_dVb; dVgst_dVg = dVgs_eff_dVg; CoxWL = model->BSIM3cox * pParam->BSIM3weffCV * pParam->BSIM3leffCV; Arg1 = Vgs_eff - Vbseff - Vfb; if (Arg1 <= 0.0) { qgate = CoxWL * Arg1; qbulk = -qgate; qdrn = 0.0; here->BSIM3cggb = CoxWL * dVgs_eff_dVg; here->BSIM3cgdb = 0.0; here->BSIM3cgsb = CoxWL * (dVbseff_dVb - dVgs_eff_dVg); here->BSIM3cdgb = 0.0; here->BSIM3cddb = 0.0; here->BSIM3cdsb = 0.0; here->BSIM3cbgb = -CoxWL * dVgs_eff_dVg; here->BSIM3cbdb = 0.0; here->BSIM3cbsb = -here->BSIM3cgsb; here->BSIM3qinv = 0.0; } else if (Vgst <= 0.0) { T1 = 0.5 * pParam->BSIM3k1ox; T2 = sqrt(T1 * T1 + Arg1); qgate = CoxWL * pParam->BSIM3k1ox * (T2 - T1); qbulk = -qgate; qdrn = 0.0; T0 = CoxWL * T1 / T2; here->BSIM3cggb = T0 * dVgs_eff_dVg; here->BSIM3cgdb = 0.0; here->BSIM3cgsb = T0 * (dVbseff_dVb - dVgs_eff_dVg); here->BSIM3cdgb = 0.0; here->BSIM3cddb = 0.0; here->BSIM3cdsb = 0.0; here->BSIM3cbgb = -here->BSIM3cggb; here->BSIM3cbdb = 0.0; here->BSIM3cbsb = -here->BSIM3cgsb; here->BSIM3qinv = 0.0; } else { One_Third_CoxWL = CoxWL / 3.0; Two_Third_CoxWL = 2.0 * One_Third_CoxWL; AbulkCV = Abulk0 * pParam->BSIM3abulkCVfactor; dAbulkCV_dVb = pParam->BSIM3abulkCVfactor * dAbulk0_dVb; Vdsat = Vgst / AbulkCV; dVdsat_dVg = dVgs_eff_dVg / AbulkCV; dVdsat_dVb = - (Vdsat * dAbulkCV_dVb + dVth_dVb)/ AbulkCV; if (model->BSIM3xpart > 0.5) { /* 0/100 Charge partition model */ if (Vdsat <= Vds) { /* saturation region */ T1 = Vdsat / 3.0; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3phi - T1); T2 = -Two_Third_CoxWL * Vgst; qbulk = -(qgate + T2); qdrn = 0.0; here->BSIM3cggb = One_Third_CoxWL * (3.0 - dVdsat_dVg) * dVgs_eff_dVg; T2 = -One_Third_CoxWL * dVdsat_dVb; here->BSIM3cgsb = -(here->BSIM3cggb + T2); here->BSIM3cgdb = 0.0; here->BSIM3cdgb = 0.0; here->BSIM3cddb = 0.0; here->BSIM3cdsb = 0.0; here->BSIM3cbgb = -(here->BSIM3cggb - Two_Third_CoxWL * dVgs_eff_dVg); T3 = -(T2 + Two_Third_CoxWL * dVth_dVb); here->BSIM3cbsb = -(here->BSIM3cbgb + T3); here->BSIM3cbdb = 0.0; here->BSIM3qinv = -(qgate + qbulk); } else { /* linear region */ Alphaz = Vgst / Vdsat; T1 = 2.0 * Vdsat - Vds; T2 = Vds / (3.0 * T1); T3 = T2 * Vds; T9 = 0.25 * CoxWL; T4 = T9 * Alphaz; T7 = 2.0 * Vds - T1 - 3.0 * T3; T8 = T3 - T1 - 2.0 * Vds; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3phi - 0.5 * (Vds - T3)); T10 = T4 * T8; qdrn = T4 * T7; qbulk = -(qgate + qdrn + T10); T5 = T3 / T1; here->BSIM3cggb = CoxWL * (1.0 - T5 * dVdsat_dVg) * dVgs_eff_dVg; T11 = -CoxWL * T5 * dVdsat_dVb; here->BSIM3cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5); here->BSIM3cgsb = -(here->BSIM3cggb + T11 + here->BSIM3cgdb); T6 = 1.0 / Vdsat; dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg); dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb); T7 = T9 * T7; T8 = T9 * T8; T9 = 2.0 * T4 * (1.0 - 3.0 * T5); here->BSIM3cdgb = (T7 * dAlphaz_dVg - T9 * dVdsat_dVg) * dVgs_eff_dVg; T12 = T7 * dAlphaz_dVb - T9 * dVdsat_dVb; here->BSIM3cddb = T4 * (3.0 - 6.0 * T2 - 3.0 * T5); here->BSIM3cdsb = -(here->BSIM3cdgb + T12 + here->BSIM3cddb); T9 = 2.0 * T4 * (1.0 + T5); T10 = (T8 * dAlphaz_dVg - T9 * dVdsat_dVg) * dVgs_eff_dVg; T11 = T8 * dAlphaz_dVb - T9 * dVdsat_dVb; T12 = T4 * (2.0 * T2 + T5 - 1.0); T0 = -(T10 + T11 + T12); here->BSIM3cbgb = -(here->BSIM3cggb + here->BSIM3cdgb + T10); here->BSIM3cbdb = -(here->BSIM3cgdb + here->BSIM3cddb + T12); here->BSIM3cbsb = -(here->BSIM3cgsb + here->BSIM3cdsb + T0); here->BSIM3qinv = -(qgate + qbulk); } } else if (model->BSIM3xpart < 0.5) { /* 40/60 Charge partition model */ if (Vds >= Vdsat) { /* saturation region */ T1 = Vdsat / 3.0; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3phi - T1); T2 = -Two_Third_CoxWL * Vgst; qbulk = -(qgate + T2); qdrn = 0.4 * T2; here->BSIM3cggb = One_Third_CoxWL * (3.0 - dVdsat_dVg) * dVgs_eff_dVg; T2 = -One_Third_CoxWL * dVdsat_dVb; here->BSIM3cgsb = -(here->BSIM3cggb + T2); here->BSIM3cgdb = 0.0; T3 = 0.4 * Two_Third_CoxWL; here->BSIM3cdgb = -T3 * dVgs_eff_dVg; here->BSIM3cddb = 0.0; T4 = T3 * dVth_dVb; here->BSIM3cdsb = -(T4 + here->BSIM3cdgb); here->BSIM3cbgb = -(here->BSIM3cggb - Two_Third_CoxWL * dVgs_eff_dVg); T3 = -(T2 + Two_Third_CoxWL * dVth_dVb); here->BSIM3cbsb = -(here->BSIM3cbgb + T3); here->BSIM3cbdb = 0.0; here->BSIM3qinv = -(qgate + qbulk); } else { /* linear region */ Alphaz = Vgst / Vdsat; T1 = 2.0 * Vdsat - Vds; T2 = Vds / (3.0 * T1); T3 = T2 * Vds; T9 = 0.25 * CoxWL; T4 = T9 * Alphaz; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3phi - 0.5 * (Vds - T3)); T5 = T3 / T1; here->BSIM3cggb = CoxWL * (1.0 - T5 * dVdsat_dVg) * dVgs_eff_dVg; tmp = -CoxWL * T5 * dVdsat_dVb; here->BSIM3cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5); here->BSIM3cgsb = -(here->BSIM3cggb + here->BSIM3cgdb + tmp); T6 = 1.0 / Vdsat; dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg); dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb); T6 = 8.0 * Vdsat * Vdsat - 6.0 * Vdsat * Vds + 1.2 * Vds * Vds; T8 = T2 / T1; T7 = Vds - T1 - T8 * T6; qdrn = T4 * T7; T7 *= T9; tmp = T8 / T1; tmp1 = T4 * (2.0 - 4.0 * tmp * T6 + T8 * (16.0 * Vdsat - 6.0 * Vds)); here->BSIM3cdgb = (T7 * dAlphaz_dVg - tmp1 * dVdsat_dVg) * dVgs_eff_dVg; T10 = T7 * dAlphaz_dVb - tmp1 * dVdsat_dVb; here->BSIM3cddb = T4 * (2.0 - (1.0 / (3.0 * T1 * T1) + 2.0 * tmp) * T6 + T8 * (6.0 * Vdsat - 2.4 * Vds)); here->BSIM3cdsb = -(here->BSIM3cdgb + T10 + here->BSIM3cddb); T7 = 2.0 * (T1 + T3); qbulk = -(qgate - T4 * T7); T7 *= T9; T0 = 4.0 * T4 * (1.0 - T5); T12 = (-T7 * dAlphaz_dVg - here->BSIM3cdgb - T0 * dVdsat_dVg) * dVgs_eff_dVg; T11 = -T7 * dAlphaz_dVb - T10 - T0 * dVdsat_dVb; T10 = -4.0 * T4 * (T2 - 0.5 + 0.5 * T5) - here->BSIM3cddb; tmp = -(T10 + T11 + T12); here->BSIM3cbgb = -(here->BSIM3cggb + here->BSIM3cdgb + T12); here->BSIM3cbdb = -(here->BSIM3cgdb + here->BSIM3cddb + T10); /* bug fix */ here->BSIM3cbsb = -(here->BSIM3cgsb + here->BSIM3cdsb + tmp); here->BSIM3qinv = -(qgate + qbulk); } } else { /* 50/50 partitioning */ if (Vds >= Vdsat) { /* saturation region */ T1 = Vdsat / 3.0; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3phi - T1); T2 = -Two_Third_CoxWL * Vgst; qbulk = -(qgate + T2); qdrn = 0.5 * T2; here->BSIM3cggb = One_Third_CoxWL * (3.0 - dVdsat_dVg) * dVgs_eff_dVg; T2 = -One_Third_CoxWL * dVdsat_dVb; here->BSIM3cgsb = -(here->BSIM3cggb + T2); here->BSIM3cgdb = 0.0; here->BSIM3cdgb = -One_Third_CoxWL * dVgs_eff_dVg; here->BSIM3cddb = 0.0; T4 = One_Third_CoxWL * dVth_dVb; here->BSIM3cdsb = -(T4 + here->BSIM3cdgb); here->BSIM3cbgb = -(here->BSIM3cggb - Two_Third_CoxWL * dVgs_eff_dVg); T3 = -(T2 + Two_Third_CoxWL * dVth_dVb); here->BSIM3cbsb = -(here->BSIM3cbgb + T3); here->BSIM3cbdb = 0.0; here->BSIM3qinv = -(qgate + qbulk); } else { /* linear region */ Alphaz = Vgst / Vdsat; T1 = 2.0 * Vdsat - Vds; T2 = Vds / (3.0 * T1); T3 = T2 * Vds; T9 = 0.25 * CoxWL; T4 = T9 * Alphaz; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3phi - 0.5 * (Vds - T3)); T5 = T3 / T1; here->BSIM3cggb = CoxWL * (1.0 - T5 * dVdsat_dVg) * dVgs_eff_dVg; tmp = -CoxWL * T5 * dVdsat_dVb; here->BSIM3cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5); here->BSIM3cgsb = -(here->BSIM3cggb + here->BSIM3cgdb + tmp); T6 = 1.0 / Vdsat; dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg); dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb); T7 = T1 + T3; qdrn = -T4 * T7; qbulk = - (qgate + qdrn + qdrn); T7 *= T9; T0 = T4 * (2.0 * T5 - 2.0); here->BSIM3cdgb = (T0 * dVdsat_dVg - T7 * dAlphaz_dVg) * dVgs_eff_dVg; T12 = T0 * dVdsat_dVb - T7 * dAlphaz_dVb; here->BSIM3cddb = T4 * (1.0 - 2.0 * T2 - T5); here->BSIM3cdsb = -(here->BSIM3cdgb + T12 + here->BSIM3cddb); here->BSIM3cbgb = -(here->BSIM3cggb + 2.0 * here->BSIM3cdgb); here->BSIM3cbdb = -(here->BSIM3cgdb + 2.0 * here->BSIM3cddb); here->BSIM3cbsb = -(here->BSIM3cgsb + 2.0 * here->BSIM3cdsb); here->BSIM3qinv = -(qgate + qbulk); } } } } else { if (Vbseff < 0.0) { VbseffCV = Vbseff; dVbseffCV_dVb = 1.0; } else { VbseffCV = pParam->BSIM3phi - Phis; dVbseffCV_dVb = -dPhis_dVb; } CoxWL = model->BSIM3cox * pParam->BSIM3weffCV * pParam->BSIM3leffCV; /* Seperate VgsteffCV with noff and voffcv */ noff = n * pParam->BSIM3noff; dnoff_dVd = pParam->BSIM3noff * dn_dVd; dnoff_dVb = pParam->BSIM3noff * dn_dVb; T0 = Vtm * noff; voffcv = pParam->BSIM3voffcv; VgstNVt = (Vgst - voffcv) / T0; if (VgstNVt > EXP_THRESHOLD) { Vgsteff = Vgst - voffcv; dVgsteff_dVg = dVgs_eff_dVg; dVgsteff_dVd = -dVth_dVd; dVgsteff_dVb = -dVth_dVb; } else if (VgstNVt < -EXP_THRESHOLD) { Vgsteff = T0 * log(1.0 + MIN_EXP); dVgsteff_dVg = 0.0; dVgsteff_dVd = Vgsteff / noff; dVgsteff_dVb = dVgsteff_dVd * dnoff_dVb; dVgsteff_dVd *= dnoff_dVd; } else { ExpVgst = exp(VgstNVt); Vgsteff = T0 * log(1.0 + ExpVgst); dVgsteff_dVg = ExpVgst / (1.0 + ExpVgst); dVgsteff_dVd = -dVgsteff_dVg * (dVth_dVd + (Vgst - voffcv) / noff * dnoff_dVd) + Vgsteff / noff * dnoff_dVd; dVgsteff_dVb = -dVgsteff_dVg * (dVth_dVb + (Vgst - voffcv) / noff * dnoff_dVb) + Vgsteff / noff * dnoff_dVb; dVgsteff_dVg *= dVgs_eff_dVg; } /* End of VgsteffCV */ if (model->BSIM3capMod == 1) { Vfb = here->BSIM3vfbzb; Arg1 = Vgs_eff - VbseffCV - Vfb - Vgsteff; if (Arg1 <= 0.0) { qgate = CoxWL * Arg1; Cgg = CoxWL * (dVgs_eff_dVg - dVgsteff_dVg); Cgd = -CoxWL * dVgsteff_dVd; Cgb = -CoxWL * (dVbseffCV_dVb + dVgsteff_dVb); } else { T0 = 0.5 * pParam->BSIM3k1ox; T1 = sqrt(T0 * T0 + Arg1); T2 = CoxWL * T0 / T1; qgate = CoxWL * pParam->BSIM3k1ox * (T1 - T0); Cgg = T2 * (dVgs_eff_dVg - dVgsteff_dVg); Cgd = -T2 * dVgsteff_dVd; Cgb = -T2 * (dVbseffCV_dVb + dVgsteff_dVb); } qbulk = -qgate; Cbg = -Cgg; Cbd = -Cgd; Cbb = -Cgb; One_Third_CoxWL = CoxWL / 3.0; Two_Third_CoxWL = 2.0 * One_Third_CoxWL; AbulkCV = Abulk0 * pParam->BSIM3abulkCVfactor; dAbulkCV_dVb = pParam->BSIM3abulkCVfactor * dAbulk0_dVb; VdsatCV = Vgsteff / AbulkCV; if (VdsatCV < Vds) { dVdsatCV_dVg = 1.0 / AbulkCV; dVdsatCV_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV; T0 = Vgsteff - VdsatCV / 3.0; dT0_dVg = 1.0 - dVdsatCV_dVg / 3.0; dT0_dVb = -dVdsatCV_dVb / 3.0; qgate += CoxWL * T0; Cgg1 = CoxWL * dT0_dVg; Cgb1 = CoxWL * dT0_dVb + Cgg1 * dVgsteff_dVb; Cgd1 = Cgg1 * dVgsteff_dVd; Cgg1 *= dVgsteff_dVg; Cgg += Cgg1; Cgb += Cgb1; Cgd += Cgd1; T0 = VdsatCV - Vgsteff; dT0_dVg = dVdsatCV_dVg - 1.0; dT0_dVb = dVdsatCV_dVb; qbulk += One_Third_CoxWL * T0; Cbg1 = One_Third_CoxWL * dT0_dVg; Cbb1 = One_Third_CoxWL * dT0_dVb + Cbg1 * dVgsteff_dVb; Cbd1 = Cbg1 * dVgsteff_dVd; Cbg1 *= dVgsteff_dVg; Cbg += Cbg1; Cbb += Cbb1; Cbd += Cbd1; if (model->BSIM3xpart > 0.5) T0 = -Two_Third_CoxWL; else if (model->BSIM3xpart < 0.5) T0 = -0.4 * CoxWL; else T0 = -One_Third_CoxWL; qsrc = T0 * Vgsteff; Csg = T0 * dVgsteff_dVg; Csb = T0 * dVgsteff_dVb; Csd = T0 * dVgsteff_dVd; Cgb *= dVbseff_dVb; Cbb *= dVbseff_dVb; Csb *= dVbseff_dVb; } else { T0 = AbulkCV * Vds; T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1.e-20); T2 = Vds / T1; T3 = T0 * T2; dT3_dVg = -12.0 * T2 * T2 * AbulkCV; dT3_dVd = 6.0 * T0 * (4.0 * Vgsteff - T0) / T1 / T1 - 0.5; dT3_dVb = 12.0 * T2 * T2 * dAbulkCV_dVb * Vgsteff; qgate += CoxWL * (Vgsteff - 0.5 * Vds + T3); Cgg1 = CoxWL * (1.0 + dT3_dVg); Cgb1 = CoxWL * dT3_dVb + Cgg1 * dVgsteff_dVb; Cgd1 = CoxWL * dT3_dVd + Cgg1 * dVgsteff_dVd; Cgg1 *= dVgsteff_dVg; Cgg += Cgg1; Cgb += Cgb1; Cgd += Cgd1; qbulk += CoxWL * (1.0 - AbulkCV) * (0.5 * Vds - T3); Cbg1 = -CoxWL * ((1.0 - AbulkCV) * dT3_dVg); Cbb1 = -CoxWL * ((1.0 - AbulkCV) * dT3_dVb + (0.5 * Vds - T3) * dAbulkCV_dVb) + Cbg1 * dVgsteff_dVb; Cbd1 = -CoxWL * (1.0 - AbulkCV) * dT3_dVd + Cbg1 * dVgsteff_dVd; Cbg1 *= dVgsteff_dVg; Cbg += Cbg1; Cbb += Cbb1; Cbd += Cbd1; if (model->BSIM3xpart > 0.5) { /* 0/100 Charge petition model */ T1 = T1 + T1; qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0 - T0 * T0 / T1); Csg = -CoxWL * (0.5 + 24.0 * T0 * Vds / T1 / T1 * AbulkCV); Csb = -CoxWL * (0.25 * Vds * dAbulkCV_dVb - 12.0 * T0 * Vds / T1 / T1 * (4.0 * Vgsteff - T0) * dAbulkCV_dVb) + Csg * dVgsteff_dVb; Csd = -CoxWL * (0.25 * AbulkCV - 12.0 * AbulkCV * T0 / T1 / T1 * (4.0 * Vgsteff - T0)) + Csg * dVgsteff_dVd; Csg *= dVgsteff_dVg; } else if (model->BSIM3xpart < 0.5) { /* 40/60 Charge petition model */ T1 = T1 / 12.0; T2 = 0.5 * CoxWL / (T1 * T1); T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T2 * T3; T4 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0) + 0.4 * T0 * T0; Csg = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0 * Vgsteff - 8.0 * T0 / 3.0) + 2.0 * T0 * T0 / 3.0); Csb = (qsrc / T1 * Vds + T2 * T4 * Vds) * dAbulkCV_dVb + Csg * dVgsteff_dVb; Csd = (qsrc / T1 + T2 * T4) * AbulkCV + Csg * dVgsteff_dVd; Csg *= dVgsteff_dVg; } else { /* 50/50 Charge petition model */ qsrc = -0.5 * (qgate + qbulk); Csg = -0.5 * (Cgg1 + Cbg1); Csb = -0.5 * (Cgb1 + Cbb1); Csd = -0.5 * (Cgd1 + Cbd1); } Cgb *= dVbseff_dVb; Cbb *= dVbseff_dVb; Csb *= dVbseff_dVb; } qdrn = -(qgate + qbulk + qsrc); here->BSIM3cggb = Cgg; here->BSIM3cgsb = -(Cgg + Cgd + Cgb); here->BSIM3cgdb = Cgd; here->BSIM3cdgb = -(Cgg + Cbg + Csg); here->BSIM3cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb + Csg + Csd + Csb); here->BSIM3cddb = -(Cgd + Cbd + Csd); here->BSIM3cbgb = Cbg; here->BSIM3cbsb = -(Cbg + Cbd + Cbb); here->BSIM3cbdb = Cbd; here->BSIM3qinv = -(qgate + qbulk); } else if (model->BSIM3capMod == 2) { Vfb = here->BSIM3vfbzb; V3 = Vfb - Vgs_eff + VbseffCV - DELTA_3; if (Vfb <= 0.0) { T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfb); T2 = -DELTA_3 / T0; } else { T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfb); T2 = DELTA_3 / T0; } T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = Vfb - 0.5 * (V3 + T0); dVfbeff_dVg = T1 * dVgs_eff_dVg; dVfbeff_dVb = -T1 * dVbseffCV_dVb; Qac0 = CoxWL * (Vfbeff - Vfb); dQac0_dVg = CoxWL * dVfbeff_dVg; dQac0_dVb = CoxWL * dVfbeff_dVb; T0 = 0.5 * pParam->BSIM3k1ox; T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff; if (pParam->BSIM3k1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->BSIM3k1ox; T2 = CoxWL; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWL * T0 / T1; } Qsub0 = CoxWL * pParam->BSIM3k1ox * (T1 - T0); dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg); dQsub0_dVd = -T2 * dVgsteff_dVd; dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb + dVgsteff_dVb); AbulkCV = Abulk0 * pParam->BSIM3abulkCVfactor; dAbulkCV_dVb = pParam->BSIM3abulkCVfactor * dAbulk0_dVb; VdsatCV = Vgsteff / AbulkCV; V4 = VdsatCV - Vds - DELTA_4; T0 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV); VdseffCV = VdsatCV - 0.5 * (V4 + T0); T1 = 0.5 * (1.0 + V4 / T0); T2 = DELTA_4 / T0; T3 = (1.0 - T1 - T2) / AbulkCV; dVdseffCV_dVg = T3; dVdseffCV_dVd = T1; dVdseffCV_dVb = -T3 * VdsatCV * dAbulkCV_dVb; /* Added to eliminate non-zero VdseffCV at Vds=0.0 */ if (Vds == 0.0) { VdseffCV = 0.0; dVdseffCV_dVg = 0.0; dVdseffCV_dVb = 0.0; } T0 = AbulkCV * VdseffCV; T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1e-20); T2 = VdseffCV / T1; T3 = T0 * T2; T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV); T5 = (6.0 * T0 * (4.0 * Vgsteff - T0) / (T1 * T1) - 0.5); T6 = 12.0 * T2 * T2 * Vgsteff; qinoi = -CoxWL * (Vgsteff - 0.5 * T0 + AbulkCV * T3); qgate = CoxWL * (Vgsteff - 0.5 * VdseffCV + T3); Cgg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg); Cgd1 = CoxWL * T5 * dVdseffCV_dVd + Cgg1 * dVgsteff_dVd; Cgb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Cgg1 * dVgsteff_dVb; Cgg1 *= dVgsteff_dVg; T7 = 1.0 - AbulkCV; qbulk = CoxWL * T7 * (0.5 * VdseffCV - T3); T4 = -T7 * (T4 - 1.0); T5 = -T7 * T5; T6 = -(T7 * T6 + (0.5 * VdseffCV - T3)); Cbg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg); Cbd1 = CoxWL * T5 * dVdseffCV_dVd + Cbg1 * dVgsteff_dVd; Cbb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Cbg1 * dVgsteff_dVb; Cbg1 *= dVgsteff_dVg; if (model->BSIM3xpart > 0.5) { /* 0/100 Charge petition model */ T1 = T1 + T1; qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0 - T0 * T0 / T1); T7 = (4.0 * Vgsteff - T0) / (T1 * T1); T4 = -(0.5 + 24.0 * T0 * T0 / (T1 * T1)); T5 = -(0.25 * AbulkCV - 12.0 * AbulkCV * T0 * T7); T6 = -(0.25 * VdseffCV - 12.0 * T0 * VdseffCV * T7); Csg = CoxWL * (T4 + T5 * dVdseffCV_dVg); Csd = CoxWL * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd; Csb = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Csg * dVgsteff_dVb; Csg *= dVgsteff_dVg; } else if (model->BSIM3xpart < 0.5) { /* 40/60 Charge petition model */ T1 = T1 / 12.0; T2 = 0.5 * CoxWL / (T1 * T1); T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T2 * T3; T7 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0) + 0.4 * T0 * T0; T4 = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0 * Vgsteff - 8.0 * T0 / 3.0) + 2.0 * T0 * T0 / 3.0); T5 = (qsrc / T1 + T2 * T7) * AbulkCV; T6 = (qsrc / T1 * VdseffCV + T2 * T7 * VdseffCV); Csg = (T4 + T5 * dVdseffCV_dVg); Csd = T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd; Csb = (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Csg * dVgsteff_dVb; Csg *= dVgsteff_dVg; } else { /* 50/50 Charge petition model */ qsrc = -0.5 * (qgate + qbulk); Csg = -0.5 * (Cgg1 + Cbg1); Csb = -0.5 * (Cgb1 + Cbb1); Csd = -0.5 * (Cgd1 + Cbd1); } qgate += Qac0 + Qsub0; qbulk -= (Qac0 + Qsub0); qdrn = -(qgate + qbulk + qsrc); Cgg = dQac0_dVg + dQsub0_dVg + Cgg1; Cgd = dQsub0_dVd + Cgd1; Cgb = dQac0_dVb + dQsub0_dVb + Cgb1; Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg; Cbd = Cbd1 - dQsub0_dVd; Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb; Cgb *= dVbseff_dVb; Cbb *= dVbseff_dVb; Csb *= dVbseff_dVb; here->BSIM3cggb = Cgg; here->BSIM3cgsb = -(Cgg + Cgd + Cgb); here->BSIM3cgdb = Cgd; here->BSIM3cdgb = -(Cgg + Cbg + Csg); here->BSIM3cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb + Csg + Csd + Csb); here->BSIM3cddb = -(Cgd + Cbd + Csd); here->BSIM3cbgb = Cbg; here->BSIM3cbsb = -(Cbg + Cbd + Cbb); here->BSIM3cbdb = Cbd; here->BSIM3qinv = qinoi; } /* New Charge-Thickness capMod (CTM) begins */ else if (model->BSIM3capMod == 3) { V3 = here->BSIM3vfbzb - Vgs_eff + VbseffCV - DELTA_3; if (here->BSIM3vfbzb <= 0.0) { T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * here->BSIM3vfbzb); T2 = -DELTA_3 / T0; } else { T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * here->BSIM3vfbzb); T2 = DELTA_3 / T0; } T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = here->BSIM3vfbzb - 0.5 * (V3 + T0); dVfbeff_dVg = T1 * dVgs_eff_dVg; dVfbeff_dVb = -T1 * dVbseffCV_dVb; Cox = model->BSIM3cox; Tox = 1.0e8 * model->BSIM3tox; T0 = (Vgs_eff - VbseffCV - here->BSIM3vfbzb) / Tox; dT0_dVg = dVgs_eff_dVg / Tox; dT0_dVb = -dVbseffCV_dVb / Tox; tmp = T0 * pParam->BSIM3acde; if ((-EXP_THRESHOLD < tmp) && (tmp < EXP_THRESHOLD)) { Tcen = pParam->BSIM3ldeb * exp(tmp); dTcen_dVg = pParam->BSIM3acde * Tcen; dTcen_dVb = dTcen_dVg * dT0_dVb; dTcen_dVg *= dT0_dVg; } else if (tmp <= -EXP_THRESHOLD) { Tcen = pParam->BSIM3ldeb * MIN_EXP; dTcen_dVg = dTcen_dVb = 0.0; } else { Tcen = pParam->BSIM3ldeb * MAX_EXP; dTcen_dVg = dTcen_dVb = 0.0; } LINK = 1.0e-3 * model->BSIM3tox; V3 = pParam->BSIM3ldeb - Tcen - LINK; V4 = sqrt(V3 * V3 + 4.0 * LINK * pParam->BSIM3ldeb); Tcen = pParam->BSIM3ldeb - 0.5 * (V3 + V4); T1 = 0.5 * (1.0 + V3 / V4); dTcen_dVg *= T1; dTcen_dVb *= T1; Ccen = EPSSI / Tcen; T2 = Cox / (Cox + Ccen); Coxeff = T2 * Ccen; T3 = -Ccen / Tcen; dCoxeff_dVg = T2 * T2 * T3; dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb; dCoxeff_dVg *= dTcen_dVg; CoxWLcen = CoxWL * Coxeff / Cox; Qac0 = CoxWLcen * (Vfbeff - here->BSIM3vfbzb); QovCox = Qac0 / Coxeff; dQac0_dVg = CoxWLcen * dVfbeff_dVg + QovCox * dCoxeff_dVg; dQac0_dVb = CoxWLcen * dVfbeff_dVb + QovCox * dCoxeff_dVb; T0 = 0.5 * pParam->BSIM3k1ox; T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff; if (pParam->BSIM3k1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->BSIM3k1ox; T2 = CoxWLcen; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWLcen * T0 / T1; } Qsub0 = CoxWLcen * pParam->BSIM3k1ox * (T1 - T0); QovCox = Qsub0 / Coxeff; dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg) + QovCox * dCoxeff_dVg; dQsub0_dVd = -T2 * dVgsteff_dVd; dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb + dVgsteff_dVb) + QovCox * dCoxeff_dVb; /* Gate-bias dependent delta Phis begins */ if (pParam->BSIM3k1ox <= 0.0) { Denomi = 0.25 * pParam->BSIM3moin * Vtm; T0 = 0.5 * pParam->BSIM3sqrtPhi; } else { Denomi = pParam->BSIM3moin * Vtm * pParam->BSIM3k1ox * pParam->BSIM3k1ox; T0 = pParam->BSIM3k1ox * pParam->BSIM3sqrtPhi; } T1 = 2.0 * T0 + Vgsteff; DeltaPhi = Vtm * log(1.0 + T1 * Vgsteff / Denomi); dDeltaPhi_dVg = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff); /* End of delta Phis */ /* VgDP = Vgsteff - DeltaPhi */ T0 = Vgsteff - DeltaPhi - 0.001; dT0_dVg = 1.0 - dDeltaPhi_dVg; T1 = sqrt(T0 * T0 + Vgsteff * 0.004); VgDP = 0.5 * (T0 + T1); dVgDP_dVg = 0.5 * (dT0_dVg + (T0 * dT0_dVg + 0.002) / T1); T3 = 4.0 * (Vth - here->BSIM3vfbzb - pParam->BSIM3phi); Tox += Tox; if (T3 >= 0.0) { T0 = (Vgsteff + T3) / Tox; dT0_dVd = (dVgsteff_dVd + 4.0 * dVth_dVd) / Tox; dT0_dVb = (dVgsteff_dVb + 4.0 * dVth_dVb) / Tox; } else { T0 = (Vgsteff + 1.0e-20) / Tox; dT0_dVd = dVgsteff_dVd / Tox; dT0_dVb = dVgsteff_dVb / Tox; } tmp = exp(0.7 * log(T0)); T1 = 1.0 + tmp; T2 = 0.7 * tmp / (T0 * Tox); Tcen = 1.9e-9 / T1; dTcen_dVg = -1.9e-9 * T2 / T1 /T1; dTcen_dVd = Tox * dTcen_dVg; dTcen_dVb = dTcen_dVd * dT0_dVb; dTcen_dVd *= dT0_dVd; dTcen_dVg *= dVgsteff_dVg; Ccen = EPSSI / Tcen; T0 = Cox / (Cox + Ccen); Coxeff = T0 * Ccen; T1 = -Ccen / Tcen; dCoxeff_dVg = T0 * T0 * T1; dCoxeff_dVd = dCoxeff_dVg * dTcen_dVd; dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb; dCoxeff_dVg *= dTcen_dVg; CoxWLcen = CoxWL * Coxeff / Cox; AbulkCV = Abulk0 * pParam->BSIM3abulkCVfactor; dAbulkCV_dVb = pParam->BSIM3abulkCVfactor * dAbulk0_dVb; VdsatCV = VgDP / AbulkCV; T0 = VdsatCV - Vds - DELTA_4; dT0_dVg = dVgDP_dVg / AbulkCV; dT0_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV; T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * VdsatCV); dT1_dVg = (T0 + DELTA_4 + DELTA_4) / T1; dT1_dVd = -T0 / T1; dT1_dVb = dT1_dVg * dT0_dVb; dT1_dVg *= dT0_dVg; if (T0 >= 0.0) { VdseffCV = VdsatCV - 0.5 * (T0 + T1); dVdseffCV_dVg = 0.5 * (dT0_dVg - dT1_dVg); dVdseffCV_dVd = 0.5 * (1.0 - dT1_dVd); dVdseffCV_dVb = 0.5 * (dT0_dVb - dT1_dVb); } else { T3 = (DELTA_4 + DELTA_4) / (T1 - T0); T4 = 1.0 - T3; T5 = VdsatCV * T3 / (T1 - T0); VdseffCV = VdsatCV * T4; dVdseffCV_dVg = dT0_dVg * T4 + T5 * (dT1_dVg - dT0_dVg); dVdseffCV_dVd = T5 * (dT1_dVd + 1.0); dVdseffCV_dVb = dT0_dVb * (1.0 - T5) + T5 * dT1_dVb; } /* Added to eliminate non-zero VdseffCV at Vds=0.0 */ if (Vds == 0.0) { VdseffCV = 0.0; dVdseffCV_dVg = 0.0; dVdseffCV_dVb = 0.0; } T0 = AbulkCV * VdseffCV; T1 = VgDP; T2 = 12.0 * (T1 - 0.5 * T0 + 1.0e-20); T3 = T0 / T2; T4 = 1.0 - 12.0 * T3 * T3; T5 = AbulkCV * (6.0 * T0 * (4.0 * T1 - T0) / (T2 * T2) - 0.5); T6 = T5 * VdseffCV / AbulkCV; qgate = qinoi = CoxWLcen * (T1 - T0 * (0.5 - T3)); QovCox = qgate / Coxeff; Cgg1 = CoxWLcen * (T4 * dVgDP_dVg + T5 * dVdseffCV_dVg); Cgd1 = CoxWLcen * T5 * dVdseffCV_dVd + Cgg1 * dVgsteff_dVd + QovCox * dCoxeff_dVd; Cgb1 = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Cgg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb; Cgg1 = Cgg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg; T7 = 1.0 - AbulkCV; T8 = T2 * T2; T9 = 12.0 * T7 * T0 * T0 / (T8 * AbulkCV); T10 = T9 * dVgDP_dVg; T11 = -T7 * T5 / AbulkCV; T12 = -(T9 * T1 / AbulkCV + VdseffCV * (0.5 - T0 / T2)); qbulk = CoxWLcen * T7 * (0.5 * VdseffCV - T0 * VdseffCV / T2); QovCox = qbulk / Coxeff; Cbg1 = CoxWLcen * (T10 + T11 * dVdseffCV_dVg); Cbd1 = CoxWLcen * T11 * dVdseffCV_dVd + Cbg1 * dVgsteff_dVd + QovCox * dCoxeff_dVd; Cbb1 = CoxWLcen * (T11 * dVdseffCV_dVb + T12 * dAbulkCV_dVb) + Cbg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb; Cbg1 = Cbg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg; if (model->BSIM3xpart > 0.5) { /* 0/100 partition */ qsrc = -CoxWLcen * (T1 / 2.0 + T0 / 4.0 - 0.5 * T0 * T0 / T2); QovCox = qsrc / Coxeff; T2 += T2; T3 = T2 * T2; T7 = -(0.25 - 12.0 * T0 * (4.0 * T1 - T0) / T3); T4 = -(0.5 + 24.0 * T0 * T0 / T3) * dVgDP_dVg; T5 = T7 * AbulkCV; T6 = T7 * VdseffCV; Csg = CoxWLcen * (T4 + T5 * dVdseffCV_dVg); Csd = CoxWLcen * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd + QovCox * dCoxeff_dVd; Csb = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Csg * dVgsteff_dVb + QovCox * dCoxeff_dVb; Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg; } else if (model->BSIM3xpart < 0.5) { /* 40/60 partition */ T2 = T2 / 12.0; T3 = 0.5 * CoxWLcen / (T2 * T2); T4 = T1 * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T3 * T4; QovCox = qsrc / Coxeff; T8 = 4.0 / 3.0 * T1 * (T1 - T0) + 0.4 * T0 * T0; T5 = -2.0 * qsrc / T2 - T3 * (T1 * (3.0 * T1 - 8.0 * T0 / 3.0) + 2.0 * T0 * T0 / 3.0); T6 = AbulkCV * (qsrc / T2 + T3 * T8); T7 = T6 * VdseffCV / AbulkCV; Csg = T5 * dVgDP_dVg + T6 * dVdseffCV_dVg; Csd = Csg * dVgsteff_dVd + T6 * dVdseffCV_dVd + QovCox * dCoxeff_dVd; Csb = Csg * dVgsteff_dVb + T6 * dVdseffCV_dVb + T7 * dAbulkCV_dVb + QovCox * dCoxeff_dVb; Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg; } else { /* 50/50 partition */ qsrc = -0.5 * qgate; Csg = -0.5 * Cgg1; Csd = -0.5 * Cgd1; Csb = -0.5 * Cgb1; } qgate += Qac0 + Qsub0 - qbulk; qbulk -= (Qac0 + Qsub0); qdrn = -(qgate + qbulk + qsrc); Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg; Cbd = Cbd1 - dQsub0_dVd; Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb; Cgg = Cgg1 - Cbg; Cgd = Cgd1 - Cbd; Cgb = Cgb1 - Cbb; Cgb *= dVbseff_dVb; Cbb *= dVbseff_dVb; Csb *= dVbseff_dVb; here->BSIM3cggb = Cgg; here->BSIM3cgsb = -(Cgg + Cgd + Cgb); here->BSIM3cgdb = Cgd; here->BSIM3cdgb = -(Cgg + Cbg + Csg); here->BSIM3cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb + Csg + Csd + Csb); here->BSIM3cddb = -(Cgd + Cbd + Csd); here->BSIM3cbgb = Cbg; here->BSIM3cbsb = -(Cbg + Cbd + Cbb); here->BSIM3cbdb = Cbd; here->BSIM3qinv = -qinoi; } /* End of CTM */ } finished: /* Returning Values to Calling Routine */ /* * COMPUTE EQUIVALENT DRAIN CURRENT SOURCE */ here->BSIM3qgate = qgate; here->BSIM3qbulk = qbulk; here->BSIM3qdrn = qdrn; here->BSIM3cd = cdrain; if (ChargeComputationNeeded) { /* charge storage elements * bulk-drain and bulk-source depletion capacitances * czbd : zero bias drain junction capacitance * czbs : zero bias source junction capacitance * czbdsw: zero bias drain junction sidewall capacitance along field oxide * czbssw: zero bias source junction sidewall capacitance along field oxide * czbdswg: zero bias drain junction sidewall capacitance along gate side * czbsswg: zero bias source junction sidewall capacitance along gate side */ if (model->BSIM3acmMod == 0) { czbd = model->BSIM3unitAreaTempJctCap * here->BSIM3drainArea; /*bug fix */ czbs = model->BSIM3unitAreaTempJctCap * here->BSIM3sourceArea; if (here->BSIM3drainPerimeter < pParam->BSIM3weff) { czbdswg = model->BSIM3unitLengthGateSidewallTempJctCap * here->BSIM3drainPerimeter; czbdsw = 0.0; } else { czbdsw = model->BSIM3unitLengthSidewallTempJctCap * (here->BSIM3drainPerimeter - pParam->BSIM3weff); czbdswg = model->BSIM3unitLengthGateSidewallTempJctCap * pParam->BSIM3weff; } if (here->BSIM3sourcePerimeter < pParam->BSIM3weff) { czbssw = 0.0; czbsswg = model->BSIM3unitLengthGateSidewallTempJctCap * here->BSIM3sourcePerimeter; } else { czbssw = model->BSIM3unitLengthSidewallTempJctCap * (here->BSIM3sourcePerimeter - pParam->BSIM3weff); czbsswg = model->BSIM3unitLengthGateSidewallTempJctCap * pParam->BSIM3weff; } } else { error = ACM_junctionCapacitances( model->BSIM3acmMod, model->BSIM3calcacm, here->BSIM3geo, model->BSIM3hdif, model->BSIM3wmlt, here->BSIM3w, model->BSIM3xw, here->BSIM3drainAreaGiven, here->BSIM3drainArea, here->BSIM3drainPerimeterGiven, here->BSIM3drainPerimeter, here->BSIM3sourceAreaGiven, here->BSIM3sourceArea, here->BSIM3sourcePerimeterGiven, here->BSIM3sourcePerimeter, model->BSIM3unitAreaTempJctCap, model->BSIM3unitLengthSidewallTempJctCap, model->BSIM3unitLengthGateSidewallJctCap, &czbd, &czbdsw, &czbdswg, &czbs, &czbssw, &czbsswg ); if (error) return(error); } MJ = model->BSIM3bulkJctBotGradingCoeff; MJSW = model->BSIM3bulkJctSideGradingCoeff; MJSWG = model->BSIM3bulkJctGateSideGradingCoeff; /* Source Bulk Junction */ if (vbs == 0.0) { *(ckt->CKTstate0 + here->BSIM3qbs) = 0.0; here->BSIM3capbs = czbs + czbssw + czbsswg; } else if (vbs < 0.0) { if (czbs > 0.0) { arg = 1.0 - vbs / model->BSIM3PhiB; if (MJ == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJ * log(arg)); *(ckt->CKTstate0 + here->BSIM3qbs) = model->BSIM3PhiB * czbs * (1.0 - arg * sarg) / (1.0 - MJ); here->BSIM3capbs = czbs * sarg; } else { *(ckt->CKTstate0 + here->BSIM3qbs) = 0.0; here->BSIM3capbs = 0.0; } if (czbssw > 0.0) { arg = 1.0 - vbs / model->BSIM3PhiBSW; if (MJSW == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSW * log(arg)); *(ckt->CKTstate0 + here->BSIM3qbs) += model->BSIM3PhiBSW * czbssw * (1.0 - arg * sarg) / (1.0 - MJSW); here->BSIM3capbs += czbssw * sarg; } if (czbsswg > 0.0) { arg = 1.0 - vbs / model->BSIM3PhiBSWG; if (MJSWG == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSWG * log(arg)); *(ckt->CKTstate0 + here->BSIM3qbs) += model->BSIM3PhiBSWG * czbsswg * (1.0 - arg * sarg) / (1.0 - MJSWG); here->BSIM3capbs += czbsswg * sarg; } } else { T0 = czbs + czbssw + czbsswg; T1 = vbs * (czbs * MJ / model->BSIM3PhiB + czbssw * MJSW / model->BSIM3PhiBSW + czbsswg * MJSWG / model->BSIM3PhiBSWG); *(ckt->CKTstate0 + here->BSIM3qbs) = vbs * (T0 + 0.5 * T1); here->BSIM3capbs = T0 + T1; } /* Drain Bulk Junction */ if (vbd == 0.0) { *(ckt->CKTstate0 + here->BSIM3qbd) = 0.0; here->BSIM3capbd = czbd + czbdsw + czbdswg; } else if (vbd < 0.0) { if (czbd > 0.0) { arg = 1.0 - vbd / model->BSIM3PhiB; if (MJ == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJ * log(arg)); *(ckt->CKTstate0 + here->BSIM3qbd) = model->BSIM3PhiB * czbd * (1.0 - arg * sarg) / (1.0 - MJ); here->BSIM3capbd = czbd * sarg; } else { *(ckt->CKTstate0 + here->BSIM3qbd) = 0.0; here->BSIM3capbd = 0.0; } if (czbdsw > 0.0) { arg = 1.0 - vbd / model->BSIM3PhiBSW; if (MJSW == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSW * log(arg)); *(ckt->CKTstate0 + here->BSIM3qbd) += model->BSIM3PhiBSW * czbdsw * (1.0 - arg * sarg) / (1.0 - MJSW); here->BSIM3capbd += czbdsw * sarg; } if (czbdswg > 0.0) { arg = 1.0 - vbd / model->BSIM3PhiBSWG; if (MJSWG == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSWG * log(arg)); *(ckt->CKTstate0 + here->BSIM3qbd) += model->BSIM3PhiBSWG * czbdswg * (1.0 - arg * sarg) / (1.0 - MJSWG); here->BSIM3capbd += czbdswg * sarg; } } else { T0 = czbd + czbdsw + czbdswg; T1 = vbd * (czbd * MJ / model->BSIM3PhiB + czbdsw * MJSW / model->BSIM3PhiBSW + czbdswg * MJSWG / model->BSIM3PhiBSWG); *(ckt->CKTstate0 + here->BSIM3qbd) = vbd * (T0 + 0.5 * T1); here->BSIM3capbd = T0 + T1; } } /* * check convergence */ if ((here->BSIM3off == 0) || (!(ckt->CKTmode & MODEINITFIX))) { if (Check == 1) { ckt->CKTnoncon++; #ifndef NEWCONV } else { if (here->BSIM3mode >= 0) { Idtot = here->BSIM3cd + here->BSIM3csub - here->BSIM3cbd; } else { Idtot = here->BSIM3cd - here->BSIM3cbd; } tol = ckt->CKTreltol * MAX(fabs(cdhat), fabs(Idtot)) + ckt->CKTabstol; if (fabs(cdhat - Idtot) >= tol) { ckt->CKTnoncon++; } else { Ibtot = here->BSIM3cbs + here->BSIM3cbd - here->BSIM3csub; tol = ckt->CKTreltol * MAX(fabs(cbhat), fabs(Ibtot)) + ckt->CKTabstol; if (fabs(cbhat - Ibtot) > tol) { ckt->CKTnoncon++; } } #endif /* NEWCONV */ } } *(ckt->CKTstate0 + here->BSIM3vbs) = vbs; *(ckt->CKTstate0 + here->BSIM3vbd) = vbd; *(ckt->CKTstate0 + here->BSIM3vgs) = vgs; *(ckt->CKTstate0 + here->BSIM3vds) = vds; *(ckt->CKTstate0 + here->BSIM3qdef) = qdef; /* bulk and channel charge plus overlaps */ if (!ChargeComputationNeeded) goto line850; #ifndef NOBYPASS line755: #endif /* NQS begins */ if ((here->BSIM3nqsMod) || (here->BSIM3acnqsMod)) { qcheq = -(qbulk + qgate); here->BSIM3cqgb = -(here->BSIM3cggb + here->BSIM3cbgb); here->BSIM3cqdb = -(here->BSIM3cgdb + here->BSIM3cbdb); here->BSIM3cqsb = -(here->BSIM3cgsb + here->BSIM3cbsb); here->BSIM3cqbb = -(here->BSIM3cqgb + here->BSIM3cqdb + here->BSIM3cqsb); gtau_drift = fabs(here->BSIM3tconst * qcheq) * ScalingFactor; T0 = pParam->BSIM3leffCV * pParam->BSIM3leffCV; gtau_diff = 16.0 * here->BSIM3u0temp * model->BSIM3vtm / T0 * ScalingFactor; here->BSIM3gtau = gtau_drift + gtau_diff; if (here->BSIM3acnqsMod) here->BSIM3taunet = ScalingFactor / here->BSIM3gtau; } if (model->BSIM3capMod == 0) /* code merge -JX */ { cgdo = pParam->BSIM3cgdo; qgdo = pParam->BSIM3cgdo * vgd; cgso = pParam->BSIM3cgso; qgso = pParam->BSIM3cgso * vgs; } else if (model->BSIM3capMod == 1) { if (vgd < 0.0) { T1 = sqrt(1.0 - 4.0 * vgd / pParam->BSIM3ckappa); cgdo = pParam->BSIM3cgdo + pParam->BSIM3weffCV * pParam->BSIM3cgdl / T1; qgdo = pParam->BSIM3cgdo * vgd - pParam->BSIM3weffCV * 0.5 * pParam->BSIM3cgdl * pParam->BSIM3ckappa * (T1 - 1.0); } else { cgdo = pParam->BSIM3cgdo + pParam->BSIM3weffCV * pParam->BSIM3cgdl; qgdo = (pParam->BSIM3weffCV * pParam->BSIM3cgdl + pParam->BSIM3cgdo) * vgd; } if (vgs < 0.0) { T1 = sqrt(1.0 - 4.0 * vgs / pParam->BSIM3ckappa); cgso = pParam->BSIM3cgso + pParam->BSIM3weffCV * pParam->BSIM3cgsl / T1; qgso = pParam->BSIM3cgso * vgs - pParam->BSIM3weffCV * 0.5 * pParam->BSIM3cgsl * pParam->BSIM3ckappa * (T1 - 1.0); } else { cgso = pParam->BSIM3cgso + pParam->BSIM3weffCV * pParam->BSIM3cgsl; qgso = (pParam->BSIM3weffCV * pParam->BSIM3cgsl + pParam->BSIM3cgso) * vgs; } } else { T0 = vgd + DELTA_1; T1 = sqrt(T0 * T0 + 4.0 * DELTA_1); T2 = 0.5 * (T0 - T1); T3 = pParam->BSIM3weffCV * pParam->BSIM3cgdl; T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM3ckappa); cgdo = pParam->BSIM3cgdo + T3 - T3 * (1.0 - 1.0 / T4) * (0.5 - 0.5 * T0 / T1); qgdo = (pParam->BSIM3cgdo + T3) * vgd - T3 * (T2 + 0.5 * pParam->BSIM3ckappa * (T4 - 1.0)); T0 = vgs + DELTA_1; T1 = sqrt(T0 * T0 + 4.0 * DELTA_1); T2 = 0.5 * (T0 - T1); T3 = pParam->BSIM3weffCV * pParam->BSIM3cgsl; T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM3ckappa); cgso = pParam->BSIM3cgso + T3 - T3 * (1.0 - 1.0 / T4) * (0.5 - 0.5 * T0 / T1); qgso = (pParam->BSIM3cgso + T3) * vgs - T3 * (T2 + 0.5 * pParam->BSIM3ckappa * (T4 - 1.0)); } here->BSIM3cgdo = cgdo; here->BSIM3cgso = cgso; ag0 = ckt->CKTag[0]; if (here->BSIM3mode > 0) { if (here->BSIM3nqsMod == 0) { gcggb = (here->BSIM3cggb + cgdo + cgso + pParam->BSIM3cgbo ) * ag0; gcgdb = (here->BSIM3cgdb - cgdo) * ag0; gcgsb = (here->BSIM3cgsb - cgso) * ag0; gcdgb = (here->BSIM3cdgb - cgdo) * ag0; gcddb = (here->BSIM3cddb + here->BSIM3capbd + cgdo) * ag0; gcdsb = here->BSIM3cdsb * ag0; gcsgb = -(here->BSIM3cggb + here->BSIM3cbgb + here->BSIM3cdgb + cgso) * ag0; gcsdb = -(here->BSIM3cgdb + here->BSIM3cbdb + here->BSIM3cddb) * ag0; gcssb = (here->BSIM3capbs + cgso - (here->BSIM3cgsb + here->BSIM3cbsb + here->BSIM3cdsb)) * ag0; gcbgb = (here->BSIM3cbgb - pParam->BSIM3cgbo) * ag0; gcbdb = (here->BSIM3cbdb - here->BSIM3capbd) * ag0; gcbsb = (here->BSIM3cbsb - here->BSIM3capbs) * ag0; qgd = qgdo; qgs = qgso; qgb = pParam->BSIM3cgbo * vgb; qgate += qgd + qgs + qgb; qbulk -= qgb; qdrn -= qgd; qsrc = -(qgate + qbulk + qdrn); ggtg = ggtd = ggtb = ggts = 0.0; sxpart = 0.6; dxpart = 0.4; ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; } else { if (qcheq > 0.0) T0 = here->BSIM3tconst * qdef * ScalingFactor; else T0 = -here->BSIM3tconst * qdef * ScalingFactor; ggtg = here->BSIM3gtg = T0 * here->BSIM3cqgb; ggtd = here->BSIM3gtd = T0 * here->BSIM3cqdb; ggts = here->BSIM3gts = T0 * here->BSIM3cqsb; ggtb = here->BSIM3gtb = T0 * here->BSIM3cqbb; gqdef = ScalingFactor * ag0; gcqgb = here->BSIM3cqgb * ag0; gcqdb = here->BSIM3cqdb * ag0; gcqsb = here->BSIM3cqsb * ag0; gcqbb = here->BSIM3cqbb * ag0; gcggb = (cgdo + cgso + pParam->BSIM3cgbo ) * ag0; gcgdb = -cgdo * ag0; gcgsb = -cgso * ag0; gcdgb = -cgdo * ag0; gcddb = (here->BSIM3capbd + cgdo) * ag0; gcdsb = 0.0; gcsgb = -cgso * ag0; gcsdb = 0.0; gcssb = (here->BSIM3capbs + cgso) * ag0; gcbgb = -pParam->BSIM3cgbo * ag0; gcbdb = -here->BSIM3capbd * ag0; gcbsb = -here->BSIM3capbs * ag0; CoxWL = model->BSIM3cox * pParam->BSIM3weffCV * pParam->BSIM3leffCV; if (fabs(qcheq) <= 1.0e-5 * CoxWL) { if (model->BSIM3xpart < 0.5) { dxpart = 0.4; } else if (model->BSIM3xpart > 0.5) { dxpart = 0.0; } else { dxpart = 0.5; } ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; } else { dxpart = qdrn / qcheq; Cdd = here->BSIM3cddb; Csd = -(here->BSIM3cgdb + here->BSIM3cddb + here->BSIM3cbdb); ddxpart_dVd = (Cdd - dxpart * (Cdd + Csd)) / qcheq; Cdg = here->BSIM3cdgb; Csg = -(here->BSIM3cggb + here->BSIM3cdgb + here->BSIM3cbgb); ddxpart_dVg = (Cdg - dxpart * (Cdg + Csg)) / qcheq; Cds = here->BSIM3cdsb; Css = -(here->BSIM3cgsb + here->BSIM3cdsb + here->BSIM3cbsb); ddxpart_dVs = (Cds - dxpart * (Cds + Css)) / qcheq; ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs); } sxpart = 1.0 - dxpart; dsxpart_dVd = -ddxpart_dVd; dsxpart_dVg = -ddxpart_dVg; dsxpart_dVs = -ddxpart_dVs; dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs); qgd = qgdo; qgs = qgso; qgb = pParam->BSIM3cgbo * vgb; qgate = qgd + qgs + qgb; qbulk = -qgb; qdrn = -qgd; qsrc = -(qgate + qbulk + qdrn); } } else { if (here->BSIM3nqsMod == 0) { gcggb = (here->BSIM3cggb + cgdo + cgso + pParam->BSIM3cgbo ) * ag0; gcgdb = (here->BSIM3cgsb - cgdo) * ag0; gcgsb = (here->BSIM3cgdb - cgso) * ag0; gcdgb = -(here->BSIM3cggb + here->BSIM3cbgb + here->BSIM3cdgb + cgdo) * ag0; gcddb = (here->BSIM3capbd + cgdo - (here->BSIM3cgsb + here->BSIM3cbsb + here->BSIM3cdsb)) * ag0; gcdsb = -(here->BSIM3cgdb + here->BSIM3cbdb + here->BSIM3cddb) * ag0; gcsgb = (here->BSIM3cdgb - cgso) * ag0; gcsdb = here->BSIM3cdsb * ag0; gcssb = (here->BSIM3cddb + here->BSIM3capbs + cgso) * ag0; gcbgb = (here->BSIM3cbgb - pParam->BSIM3cgbo) * ag0; gcbdb = (here->BSIM3cbsb - here->BSIM3capbd) * ag0; gcbsb = (here->BSIM3cbdb - here->BSIM3capbs) * ag0; qgd = qgdo; qgs = qgso; qgb = pParam->BSIM3cgbo * vgb; qgate += qgd + qgs + qgb; qbulk -= qgb; qsrc = qdrn - qgs; qdrn = -(qgate + qbulk + qsrc); ggtg = ggtd = ggtb = ggts = 0.0; sxpart = 0.4; dxpart = 0.6; ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; } else { if (qcheq > 0.0) T0 = here->BSIM3tconst * qdef * ScalingFactor; else T0 = -here->BSIM3tconst * qdef * ScalingFactor; ggtg = here->BSIM3gtg = T0 * here->BSIM3cqgb; ggts = here->BSIM3gtd = T0 * here->BSIM3cqdb; ggtd = here->BSIM3gts = T0 * here->BSIM3cqsb; ggtb = here->BSIM3gtb = T0 * here->BSIM3cqbb; gqdef = ScalingFactor * ag0; gcqgb = here->BSIM3cqgb * ag0; gcqdb = here->BSIM3cqsb * ag0; gcqsb = here->BSIM3cqdb * ag0; gcqbb = here->BSIM3cqbb * ag0; gcggb = (cgdo + cgso + pParam->BSIM3cgbo) * ag0; gcgdb = -cgdo * ag0; gcgsb = -cgso * ag0; gcdgb = -cgdo * ag0; gcddb = (here->BSIM3capbd + cgdo) * ag0; gcdsb = 0.0; gcsgb = -cgso * ag0; gcsdb = 0.0; gcssb = (here->BSIM3capbs + cgso) * ag0; gcbgb = -pParam->BSIM3cgbo * ag0; gcbdb = -here->BSIM3capbd * ag0; gcbsb = -here->BSIM3capbs * ag0; CoxWL = model->BSIM3cox * pParam->BSIM3weffCV * pParam->BSIM3leffCV; if (fabs(qcheq) <= 1.0e-5 * CoxWL) { if (model->BSIM3xpart < 0.5) { sxpart = 0.4; } else if (model->BSIM3xpart > 0.5) { sxpart = 0.0; } else { sxpart = 0.5; } dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; } else { sxpart = qdrn / qcheq; Css = here->BSIM3cddb; Cds = -(here->BSIM3cgdb + here->BSIM3cddb + here->BSIM3cbdb); dsxpart_dVs = (Css - sxpart * (Css + Cds)) / qcheq; Csg = here->BSIM3cdgb; Cdg = -(here->BSIM3cggb + here->BSIM3cdgb + here->BSIM3cbgb); dsxpart_dVg = (Csg - sxpart * (Csg + Cdg)) / qcheq; Csd = here->BSIM3cdsb; Cdd = -(here->BSIM3cgsb + here->BSIM3cdsb + here->BSIM3cbsb); dsxpart_dVd = (Csd - sxpart * (Csd + Cdd)) / qcheq; dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs); } dxpart = 1.0 - sxpart; ddxpart_dVd = -dsxpart_dVd; ddxpart_dVg = -dsxpart_dVg; ddxpart_dVs = -dsxpart_dVs; ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs); qgd = qgdo; qgs = qgso; qgb = pParam->BSIM3cgbo * vgb; qgate = qgd + qgs + qgb; qbulk = -qgb; qsrc = -qgs; qdrn = -(qgate + qbulk + qsrc); } } cqdef = cqcheq = 0.0; if (ByPass) goto line860; *(ckt->CKTstate0 + here->BSIM3qg) = qgate; *(ckt->CKTstate0 + here->BSIM3qd) = qdrn - *(ckt->CKTstate0 + here->BSIM3qbd); *(ckt->CKTstate0 + here->BSIM3qb) = qbulk + *(ckt->CKTstate0 + here->BSIM3qbd) + *(ckt->CKTstate0 + here->BSIM3qbs); if (here->BSIM3nqsMod) { *(ckt->CKTstate0 + here->BSIM3qcdump) = qdef * ScalingFactor; *(ckt->CKTstate0 + here->BSIM3qcheq) = qcheq; } /* store small signal parameters */ if (ckt->CKTmode & MODEINITSMSIG) { goto line1000; } if (!ChargeComputationNeeded) goto line850; if (ckt->CKTmode & MODEINITTRAN) { *(ckt->CKTstate1 + here->BSIM3qb) = *(ckt->CKTstate0 + here->BSIM3qb); *(ckt->CKTstate1 + here->BSIM3qg) = *(ckt->CKTstate0 + here->BSIM3qg); *(ckt->CKTstate1 + here->BSIM3qd) = *(ckt->CKTstate0 + here->BSIM3qd); if (here->BSIM3nqsMod) { *(ckt->CKTstate1 + here->BSIM3qcheq) = *(ckt->CKTstate0 + here->BSIM3qcheq); *(ckt->CKTstate1 + here->BSIM3qcdump) = *(ckt->CKTstate0 + here->BSIM3qcdump); } } error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3qb); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3qg); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3qd); if (error) return(error); if (here->BSIM3nqsMod) { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3qcdump); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3qcheq); if (error) return(error); } goto line860; line850: /* initialize to zero charge conductance and current */ ceqqg = ceqqb = ceqqd = 0.0; cqcheq = cqdef = 0.0; gcdgb = gcddb = gcdsb = 0.0; gcsgb = gcsdb = gcssb = 0.0; gcggb = gcgdb = gcgsb = 0.0; gcbgb = gcbdb = gcbsb = 0.0; gqdef = gcqgb = gcqdb = gcqsb = gcqbb = 0.0; ggtg = ggtd = ggtb = ggts = 0.0; sxpart = (1.0 - (dxpart = (here->BSIM3mode > 0) ? 0.4 : 0.6)); ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; if (here->BSIM3nqsMod) here->BSIM3gtau = 16.0 * here->BSIM3u0temp * model->BSIM3vtm / pParam->BSIM3leffCV / pParam->BSIM3leffCV * ScalingFactor; else here->BSIM3gtau = 0.0; goto line900; line860: /* evaluate equivalent charge current */ cqgate = *(ckt->CKTstate0 + here->BSIM3cqg); cqbulk = *(ckt->CKTstate0 + here->BSIM3cqb); cqdrn = *(ckt->CKTstate0 + here->BSIM3cqd); ceqqg = cqgate - gcggb * vgb + gcgdb * vbd + gcgsb * vbs; ceqqb = cqbulk - gcbgb * vgb + gcbdb * vbd + gcbsb * vbs; ceqqd = cqdrn - gcdgb * vgb + gcddb * vbd + gcdsb * vbs; if (here->BSIM3nqsMod) { T0 = ggtg * vgb - ggtd * vbd - ggts * vbs; ceqqg += T0; T1 = qdef * here->BSIM3gtau; ceqqd -= dxpart * T0 + T1 * (ddxpart_dVg * vgb - ddxpart_dVd * vbd - ddxpart_dVs * vbs); cqdef = *(ckt->CKTstate0 + here->BSIM3cqcdump) - gqdef * qdef; cqcheq = *(ckt->CKTstate0 + here->BSIM3cqcheq) - (gcqgb * vgb - gcqdb * vbd - gcqsb * vbs) + T0; } if (ckt->CKTmode & MODEINITTRAN) { *(ckt->CKTstate1 + here->BSIM3cqb) = *(ckt->CKTstate0 + here->BSIM3cqb); *(ckt->CKTstate1 + here->BSIM3cqg) = *(ckt->CKTstate0 + here->BSIM3cqg); *(ckt->CKTstate1 + here->BSIM3cqd) = *(ckt->CKTstate0 + here->BSIM3cqd); if (here->BSIM3nqsMod) { *(ckt->CKTstate1 + here->BSIM3cqcheq) = *(ckt->CKTstate0 + here->BSIM3cqcheq); *(ckt->CKTstate1 + here->BSIM3cqcdump) = *(ckt->CKTstate0 + here->BSIM3cqcdump); } } /* * load current vector */ line900: if (here->BSIM3mode >= 0) { Gm = here->BSIM3gm; Gmbs = here->BSIM3gmbs; FwdSum = Gm + Gmbs; RevSum = 0.0; cdreq = model->BSIM3type * (cdrain - here->BSIM3gds * vds - Gm * vgs - Gmbs * vbs); ceqbd = -model->BSIM3type * (here->BSIM3csub - here->BSIM3gbds * vds - here->BSIM3gbgs * vgs - here->BSIM3gbbs * vbs); ceqbs = 0.0; gbbdp = -here->BSIM3gbds; gbbsp = (here->BSIM3gbds + here->BSIM3gbgs + here->BSIM3gbbs); gbdpg = here->BSIM3gbgs; gbdpdp = here->BSIM3gbds; gbdpb = here->BSIM3gbbs; gbdpsp = -(gbdpg + gbdpdp + gbdpb); gbspg = 0.0; gbspdp = 0.0; gbspb = 0.0; gbspsp = 0.0; } else { Gm = -here->BSIM3gm; Gmbs = -here->BSIM3gmbs; FwdSum = 0.0; RevSum = -(Gm + Gmbs); cdreq = -model->BSIM3type * (cdrain + here->BSIM3gds * vds + Gm * vgd + Gmbs * vbd); ceqbs = -model->BSIM3type * (here->BSIM3csub + here->BSIM3gbds * vds - here->BSIM3gbgs * vgd - here->BSIM3gbbs * vbd); ceqbd = 0.0; gbbsp = -here->BSIM3gbds; gbbdp = (here->BSIM3gbds + here->BSIM3gbgs + here->BSIM3gbbs); gbdpg = 0.0; gbdpsp = 0.0; gbdpb = 0.0; gbdpdp = 0.0; gbspg = here->BSIM3gbgs; gbspsp = here->BSIM3gbds; gbspb = here->BSIM3gbbs; gbspdp = -(gbspg + gbspsp + gbspb); } if (model->BSIM3type > 0) { ceqbs += (here->BSIM3cbs - here->BSIM3gbs * vbs); ceqbd += (here->BSIM3cbd - here->BSIM3gbd * vbd); /* ceqqg = ceqqg; ceqqb = ceqqb; ceqqd = ceqqd; cqdef = cqdef; cqcheq = cqcheq; */ } else { ceqbs -= (here->BSIM3cbs - here->BSIM3gbs * vbs); ceqbd -= (here->BSIM3cbd - here->BSIM3gbd * vbd); ceqqg = -ceqqg; ceqqb = -ceqqb; ceqqd = -ceqqd; cqdef = -cqdef; cqcheq = -cqcheq; } m = here->BSIM3m; #ifdef USE_OMP here->BSIM3rhsG = m * ceqqg; here->BSIM3rhsB = m * (ceqbs + ceqbd + ceqqb); here->BSIM3rhsD = m * (ceqbd - cdreq - ceqqd); here->BSIM3rhsS = m * (cdreq + ceqbs + ceqqg + ceqqb + ceqqd); if (here->BSIM3nqsMod) here->BSIM3rhsQ = m * (cqcheq - cqdef); #else (*(ckt->CKTrhs + here->BSIM3gNode) -= m * ceqqg); (*(ckt->CKTrhs + here->BSIM3bNode) -= m * (ceqbs + ceqbd + ceqqb)); (*(ckt->CKTrhs + here->BSIM3dNodePrime) += m * (ceqbd - cdreq - ceqqd)); (*(ckt->CKTrhs + here->BSIM3sNodePrime) += m * (cdreq + ceqbs + ceqqg + ceqqb + ceqqd)); if (here->BSIM3nqsMod) *(ckt->CKTrhs + here->BSIM3qNode) += m * (cqcheq - cqdef); #endif /* * load y matrix */ T1 = qdef * here->BSIM3gtau; #ifdef USE_OMP here->BSIM3DdPt = m * here->BSIM3drainConductance; here->BSIM3GgPt = m * (gcggb - ggtg); here->BSIM3SsPt = m * here->BSIM3sourceConductance; here->BSIM3BbPt = m * (here->BSIM3gbd + here->BSIM3gbs - gcbgb - gcbdb - gcbsb - here->BSIM3gbbs); here->BSIM3DPdpPt = m * (here->BSIM3drainConductance + here->BSIM3gds + here->BSIM3gbd + RevSum + gcddb + dxpart * ggtd + T1 * ddxpart_dVd + gbdpdp); here->BSIM3SPspPt = m * (here->BSIM3sourceConductance + here->BSIM3gds + here->BSIM3gbs + FwdSum + gcssb + sxpart * ggts + T1 * dsxpart_dVs + gbspsp); here->BSIM3DdpPt = m * here->BSIM3drainConductance; here->BSIM3GbPt = m * (gcggb + gcgdb + gcgsb + ggtb); here->BSIM3GdpPt = m * (gcgdb - ggtd); here->BSIM3GspPt = m * (gcgsb - ggts); here->BSIM3SspPt = m * here->BSIM3sourceConductance; here->BSIM3BgPt = m * (gcbgb - here->BSIM3gbgs); here->BSIM3BdpPt = m * (gcbdb - here->BSIM3gbd + gbbdp); here->BSIM3BspPt = m * (gcbsb - here->BSIM3gbs + gbbsp); here->BSIM3DPdPt = m * here->BSIM3drainConductance; here->BSIM3DPgPt = m * (Gm + gcdgb + dxpart * ggtg + T1 * ddxpart_dVg + gbdpg); here->BSIM3DPbPt = m * (here->BSIM3gbd - Gmbs + gcdgb + gcddb + gcdsb - dxpart * ggtb - T1 * ddxpart_dVb - gbdpb); here->BSIM3DPspPt = m * (here->BSIM3gds + FwdSum - gcdsb - dxpart * ggts - T1 * ddxpart_dVs - gbdpsp); here->BSIM3SPgPt = m * (gcsgb - Gm + sxpart * ggtg + T1 * dsxpart_dVg + gbspg); here->BSIM3SPsPt = m * here->BSIM3sourceConductance; here->BSIM3SPbPt = m * (here->BSIM3gbs + Gmbs + gcsgb + gcsdb + gcssb - sxpart * ggtb - T1 * dsxpart_dVb - gbspb); here->BSIM3SPdpPt = m * (here->BSIM3gds + RevSum - gcsdb - sxpart * ggtd - T1 * dsxpart_dVd - gbspdp); if (here->BSIM3nqsMod) { here->BSIM3QqPt = m * (gqdef + here->BSIM3gtau); here->BSIM3DPqPt = m * (dxpart * here->BSIM3gtau); here->BSIM3SPqPt = m * (sxpart * here->BSIM3gtau); here->BSIM3GqPt = m * here->BSIM3gtau; here->BSIM3QgPt = m * (ggtg - gcqgb); here->BSIM3QdpPt = m * (ggtd - gcqdb); here->BSIM3QspPt = m * (ggts - gcqsb); here->BSIM3QbPt = m * (ggtb - gcqbb); } #else (*(here->BSIM3DdPtr) += m * here->BSIM3drainConductance); (*(here->BSIM3GgPtr) += m * (gcggb - ggtg)); (*(here->BSIM3SsPtr) += m * here->BSIM3sourceConductance); (*(here->BSIM3BbPtr) += m * (here->BSIM3gbd + here->BSIM3gbs - gcbgb - gcbdb - gcbsb - here->BSIM3gbbs)); (*(here->BSIM3DPdpPtr) += m * (here->BSIM3drainConductance + here->BSIM3gds + here->BSIM3gbd + RevSum + gcddb + dxpart * ggtd + T1 * ddxpart_dVd + gbdpdp)); (*(here->BSIM3SPspPtr) += m * (here->BSIM3sourceConductance + here->BSIM3gds + here->BSIM3gbs + FwdSum + gcssb + sxpart * ggts + T1 * dsxpart_dVs + gbspsp)); (*(here->BSIM3DdpPtr) -= m * here->BSIM3drainConductance); (*(here->BSIM3GbPtr) -= m * (gcggb + gcgdb + gcgsb + ggtb)); (*(here->BSIM3GdpPtr) += m * (gcgdb - ggtd)); (*(here->BSIM3GspPtr) += m * (gcgsb - ggts)); (*(here->BSIM3SspPtr) -= m * here->BSIM3sourceConductance); (*(here->BSIM3BgPtr) += m * (gcbgb - here->BSIM3gbgs)); (*(here->BSIM3BdpPtr) += m * (gcbdb - here->BSIM3gbd + gbbdp)); (*(here->BSIM3BspPtr) += m * (gcbsb - here->BSIM3gbs + gbbsp)); (*(here->BSIM3DPdPtr) -= m * here->BSIM3drainConductance); (*(here->BSIM3DPgPtr) += m * (Gm + gcdgb + dxpart * ggtg + T1 * ddxpart_dVg + gbdpg)); (*(here->BSIM3DPbPtr) -= m * (here->BSIM3gbd - Gmbs + gcdgb + gcddb + gcdsb - dxpart * ggtb - T1 * ddxpart_dVb - gbdpb)); (*(here->BSIM3DPspPtr) -= m * (here->BSIM3gds + FwdSum - gcdsb - dxpart * ggts - T1 * ddxpart_dVs - gbdpsp)); (*(here->BSIM3SPgPtr) += m * (gcsgb - Gm + sxpart * ggtg + T1 * dsxpart_dVg + gbspg)); (*(here->BSIM3SPsPtr) -= m * here->BSIM3sourceConductance); (*(here->BSIM3SPbPtr) -= m * (here->BSIM3gbs + Gmbs + gcsgb + gcsdb + gcssb - sxpart * ggtb - T1 * dsxpart_dVb - gbspb)); (*(here->BSIM3SPdpPtr) -= m * (here->BSIM3gds + RevSum - gcsdb - sxpart * ggtd - T1 * dsxpart_dVd - gbspdp)); if (here->BSIM3nqsMod) { *(here->BSIM3QqPtr) += m * (gqdef + here->BSIM3gtau); *(here->BSIM3DPqPtr) += m * (dxpart * here->BSIM3gtau); *(here->BSIM3SPqPtr) += m * (sxpart * here->BSIM3gtau); *(here->BSIM3GqPtr) -= m * here->BSIM3gtau; *(here->BSIM3QgPtr) += m * (ggtg - gcqgb); *(here->BSIM3QdpPtr) += m * (ggtd - gcqdb); *(here->BSIM3QspPtr) += m * (ggts - gcqsb); *(here->BSIM3QbPtr) += m * (ggtb - gcqbb); } #endif line1000: ; #ifndef USE_OMP } /* End of Mosfet Instance */ } /* End of Model Instance */ #endif return(OK); } #ifdef USE_OMP void BSIM3LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt) { int InstCount, idx; BSIM3instance **InstArray; BSIM3instance *here; BSIM3model *model = (BSIM3model*)inModel; InstArray = model->BSIM3InstanceArray; InstCount = model->BSIM3InstCount; for(idx = 0; idx < InstCount; idx++) { here = InstArray[idx]; model = BSIM3modPtr(here); /* Update b for Ax = b */ (*(ckt->CKTrhs + here->BSIM3gNode) -= here->BSIM3rhsG); (*(ckt->CKTrhs + here->BSIM3bNode) -= here->BSIM3rhsB); (*(ckt->CKTrhs + here->BSIM3dNodePrime) += here->BSIM3rhsD); (*(ckt->CKTrhs + here->BSIM3sNodePrime) += here->BSIM3rhsS); if (here->BSIM3nqsMod) (*(ckt->CKTrhs + here->BSIM3qNode) += here->BSIM3rhsQ); /* Update A for Ax = b */ (*(here->BSIM3DdPtr) += here->BSIM3DdPt); (*(here->BSIM3GgPtr) += here->BSIM3GgPt); (*(here->BSIM3SsPtr) += here->BSIM3SsPt); (*(here->BSIM3BbPtr) += here->BSIM3BbPt); (*(here->BSIM3DPdpPtr) += here->BSIM3DPdpPt); (*(here->BSIM3SPspPtr) += here->BSIM3SPspPt); (*(here->BSIM3DdpPtr) -= here->BSIM3DdpPt); (*(here->BSIM3GbPtr) -= here->BSIM3GbPt); (*(here->BSIM3GdpPtr) += here->BSIM3GdpPt); (*(here->BSIM3GspPtr) += here->BSIM3GspPt); (*(here->BSIM3SspPtr) -= here->BSIM3SspPt); (*(here->BSIM3BgPtr) += here->BSIM3BgPt); (*(here->BSIM3BdpPtr) += here->BSIM3BdpPt); (*(here->BSIM3BspPtr) += here->BSIM3BspPt); (*(here->BSIM3DPdPtr) -= here->BSIM3DPdPt); (*(here->BSIM3DPgPtr) += here->BSIM3DPgPt); (*(here->BSIM3DPbPtr) -= here->BSIM3DPbPt); (*(here->BSIM3DPspPtr) -= here->BSIM3DPspPt); (*(here->BSIM3SPgPtr) += here->BSIM3SPgPt); (*(here->BSIM3SPsPtr) -= here->BSIM3SPsPt); (*(here->BSIM3SPbPtr) -= here->BSIM3SPbPt); (*(here->BSIM3SPdpPtr) -= here->BSIM3SPdpPt); if (here->BSIM3nqsMod) { *(here->BSIM3QqPtr) += here->BSIM3QqPt; *(here->BSIM3DPqPtr) += here->BSIM3DPqPt; *(here->BSIM3SPqPtr) += here->BSIM3SPqPt; *(here->BSIM3GqPtr) -= here->BSIM3GqPt; *(here->BSIM3QgPtr) += here->BSIM3QgPt; *(here->BSIM3QdpPtr) += here->BSIM3QdpPt; *(here->BSIM3QspPtr) += here->BSIM3QspPt; *(here->BSIM3QbPtr) += here->BSIM3QbPt; } } } #endif
simplehash.c
/* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ #include <stdio.h> #include <stdlib.h> #include "simplehash.h" #ifdef HAVE_OPENCL #include "simplehashlib_kern.inc" #include "simplehash_kern.inc" #endif static ulong AA; static ulong BB; static ulong prime=4294967291; static uint hashtablesize; static uint hash_stride; static uint hash_ncells; static uint write_hash_collisions; static uint read_hash_collisions; static double write_hash_collisions_runsum = 0.0; static double read_hash_collisions_runsum = 0.0; static uint write_hash_collisions_count = 0; static uint read_hash_collisions_count = 0; static uint hash_report_level = 0; static uint hash_queries; static uint hash_method = QUADRATIC; static uint hash_jump_prime = 41; static double hash_mult = 3.0; static int do_compact_hash = 0; float mem_opt_factor; int (*read_hash)(ulong, int *); void (*write_hash)(uint, ulong, int *); #ifdef _OPENMP #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void (*write_hash_openmp)(uint, ulong, int *); #else void (*write_hash_openmp)(uint, ulong, int *, omp_lock_t * lock); #endif #endif int get_hash_method(void) { return(hash_method); } long long get_hashtablesize(void) { return(hashtablesize); } int *compact_hash_init(int ncells, uint isize, uint jsize, int do_init, uint report_level){ hash_ncells = 0; write_hash_collisions = 0; read_hash_collisions = 0; hash_queries = 0; hash_report_level = report_level; hash_stride = isize; int *hash = NULL; uint compact_hash_size = (uint)((double)ncells*hash_mult); compact_hash_size = ncells; uint perfect_hash_size = (uint)(isize*jsize); float hash_mem_factor = 20.0; float hash_mem_ratio = (double)perfect_hash_size/(double)compact_hash_size; if (mem_opt_factor != 1.0) hash_mem_factor /= (mem_opt_factor*0.2); do_compact_hash = (hash_mem_ratio < hash_mem_factor) ? 0 : 1; do_compact_hash = 1; if (hash_report_level >= 2) printf("DEBUG do_compact_hash %d hash_mem_ratio %f hash_mem_factor %f mem_opt_factor %f perfect_hash_size %u compact_hash_size %u\n",do_compact_hash,hash_mem_ratio,hash_mem_factor,mem_opt_factor,perfect_hash_size,compact_hash_size); if (do_compact_hash) { hashtablesize = compact_hash_size; AA = (ulong)(1.0+(double)(prime-1)*drand48()); BB = (ulong)(0.0+(double)(prime-1)*drand48()); if (AA > prime-1 || BB > prime-1) exit(0); if (hash_report_level > 1) printf("Factors AA %lu BB %lu\n",AA,BB); hash = (int *)malloc(2*hashtablesize*sizeof(int)); for (uint ii = 0; ii<2*hashtablesize; ii+=2){ hash[ii] = -1; } if (hash_method == LINEAR){ if (hash_report_level == 0){ read_hash = read_hash_linear; write_hash = write_hash_linear; } else if (hash_report_level == 1){ read_hash = read_hash_linear_report_level_1; write_hash = write_hash_linear_report_level_1; } else if (hash_report_level == 2){ read_hash = read_hash_linear_report_level_2; write_hash = write_hash_linear_report_level_2; } else if (hash_report_level == 3){ read_hash = read_hash_linear_report_level_3; write_hash = write_hash_linear_report_level_3; } } else if (hash_method == QUADRATIC) { if (hash_report_level == 0){ read_hash = read_hash_quadratic; write_hash = write_hash_quadratic; } else if (hash_report_level == 1){ read_hash = read_hash_quadratic_report_level_1; write_hash = write_hash_quadratic_report_level_1; } else if (hash_report_level == 2){ read_hash = read_hash_quadratic_report_level_2; write_hash = write_hash_quadratic_report_level_2; } else if (hash_report_level == 3){ read_hash = read_hash_quadratic_report_level_3; write_hash = write_hash_quadratic_report_level_3; } } else if (hash_method == PRIME_JUMP) { if (hash_report_level == 0){ read_hash = read_hash_primejump; write_hash = write_hash_primejump; } else if (hash_report_level == 1){ read_hash = read_hash_primejump_report_level_1; write_hash = write_hash_primejump_report_level_1; } else if (hash_report_level == 2){ read_hash = read_hash_primejump_report_level_2; write_hash = write_hash_primejump_report_level_2; } else if (hash_report_level == 3){ read_hash = read_hash_primejump_report_level_3; write_hash = write_hash_primejump_report_level_3; } } } else { hashtablesize = perfect_hash_size; hash = (int *)malloc(hashtablesize*sizeof(int)); if (do_init) { for (uint ii = 0; ii<hashtablesize; ii++){ hash[ii] = -1; } } read_hash = read_hash_perfect; write_hash = write_hash_perfect; } if (hash_report_level >= 2) { printf("Hash table size %u perfect hash table size %u memory savings %u by percentage %lf\n", hashtablesize,isize*jsize,isize*jsize-hashtablesize, (double)hashtablesize/(double)(isize*jsize)); } return(hash); } #ifdef _OPENMP #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int *compact_hash_init_openmp(int ncells, uint isize, uint jsize, int do_init, uint report_level){ #else int *compact_hash_init_openmp(int ncells, uint isize, uint jsize, int do_init, uint report_level, omp_lock_t **lock){ #endif hash_ncells = 0; write_hash_collisions = 0; read_hash_collisions = 0; hash_queries = 0; hash_report_level = report_level; hash_stride = isize; int *hash = NULL; if (choose_hash_method != METHOD_UNSET) hash_method = choose_hash_method; uint compact_hash_size = (uint)((double)ncells*hash_mult); uint perfect_hash_size = (uint)(isize*jsize); if (hash_method == METHOD_UNSET){ float hash_mem_factor = 20.0; float hash_mem_ratio = (double)perfect_hash_size/(double)compact_hash_size; if (mem_opt_factor != 1.0) hash_mem_factor /= (mem_opt_factor*0.2); hash_method = (hash_mem_ratio < hash_mem_factor) ? PERFECT_HASH : QUADRATIC; //hash_method = QUADRATIC; if (hash_report_level >= 2) printf("DEBUG hash_method %d hash_mem_ratio %f hash_mem_factor %f mem_opt_factor %f perfect_hash_size %u compact_hash_size %u\n", hash_method,hash_mem_ratio,hash_mem_factor,mem_opt_factor,perfect_hash_size,compact_hash_size); } int do_compact_hash = (hash_method == PERFECT_HASH) ? 0 : 1; if (hash_report_level >= 2) printf("DEBUG do_compact_hash %d hash_method %d perfect_hash_size %u compact_hash_size %u\n", do_compact_hash,hash_method,perfect_hash_size,compact_hash_size); if (do_compact_hash) { hashtablesize = compact_hash_size; //srand48(0); AA = (ulong)(1.0+(double)(prime-1)*drand48()); BB = (ulong)(0.0+(double)(prime-1)*drand48()); if (AA > prime-1 || BB > prime-1) exit(0); if (hash_report_level > 1) printf("Factors AA %lu BB %lu\n",AA,BB); hash = (int *)genvector(2*hashtablesize,sizeof(int)); #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 (*lock) = (omp_lock_t *)malloc(hashtablesize*sizeof(omp_lock_t)); #endif #ifdef _OPENMP #pragma omp parallel for #endif for (uint ii = 0; ii<hashtablesize; ii++){ hash[2*ii] = -1; #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 omp_init_lock(&((*lock)[ii])); #endif } if (hash_method == LINEAR){ if (hash_report_level == 0){ read_hash = read_hash_linear; write_hash_openmp = write_hash_linear_openmp; } else if (hash_report_level == 1){ read_hash = read_hash_linear_report_level_1; write_hash_openmp = write_hash_linear_openmp_report_level_1; } else if (hash_report_level == 2){ read_hash = read_hash_linear_report_level_2; write_hash_openmp = write_hash_linear_openmp_report_level_2; } else if (hash_report_level == 3){ read_hash = read_hash_linear_report_level_3; write_hash_openmp = write_hash_linear_openmp_report_level_3; } } else if (hash_method == QUADRATIC) { if (hash_report_level == 0){ read_hash = read_hash_quadratic; write_hash_openmp = write_hash_quadratic_openmp; } else if (hash_report_level == 1){ read_hash = read_hash_quadratic_report_level_1; write_hash_openmp = write_hash_quadratic_openmp_report_level_1; } else if (hash_report_level == 2){ read_hash = read_hash_quadratic_report_level_2; write_hash_openmp = write_hash_quadratic_openmp_report_level_2; } else if (hash_report_level == 3){ read_hash = read_hash_quadratic_report_level_3; write_hash_openmp = write_hash_quadratic_openmp_report_level_3; } } else if (hash_method == PRIME_JUMP) { if (hash_report_level == 0){ read_hash = read_hash_primejump; write_hash_openmp = write_hash_primejump_openmp; } else if (hash_report_level == 1){ read_hash = read_hash_primejump_report_level_1; write_hash_openmp = write_hash_primejump_openmp_report_level_1; } else if (hash_report_level == 2){ read_hash = read_hash_primejump_report_level_2; write_hash_openmp = write_hash_primejump_openmp_report_level_2; } else if (hash_report_level == 3){ read_hash = read_hash_primejump_report_level_3; write_hash_openmp = write_hash_primejump_openmp_report_level_3; } } } else { hashtablesize = perfect_hash_size; hash = (int *)genvector(hashtablesize,sizeof(int)); #ifdef _OPENMP #pragma omp parallel for #endif if (do_init) { for (uint ii = 0; ii<hashtablesize; ii++){ hash[ii] = -1; } } read_hash = read_hash_perfect; write_hash_openmp = write_hash_perfect_openmp; } if (hash_report_level >= 2) { printf("Hash table size %u perfect hash table size %u memory savings %d by percentage %lf\n", hashtablesize,isize*jsize,(int)isize*(int)jsize-(int)hashtablesize, (double)hashtablesize/(double)(isize*jsize) * 100.0); } return(hash); } #endif void write_hash_perfect(uint ic, ulong hashkey, int *hash){ hash[hashkey] = ic; } void write_hash_linear(uint ic, ulong hashkey, int *hash){ uint hashloc; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize); hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_linear_report_level_1(uint ic, ulong hashkey, int *hash){ uint hashloc; hash_ncells++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize){ write_hash_collisions++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_linear_report_level_2(uint ic, ulong hashkey, int *hash){ uint hashloc; hash_ncells++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize){ write_hash_collisions++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_linear_report_level_3(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; hash_ncells++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize){ int hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); icount++; } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_quadratic(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize) { icount++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_quadratic_report_level_1(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; hash_ncells++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_quadratic_report_level_2(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; hash_ncells++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_quadratic_report_level_3(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; hash_ncells++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; int hashloctmp = hashloc+icount*icount; hashloctmp = hashloctmp%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_primejump(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; uint jump = 1+hashkey%hash_jump_prime; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize) { icount++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_primejump_report_level_1(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; uint jump = 1+hashkey%hash_jump_prime; hash_ncells++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_primejump_report_level_2(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; uint jump = 1+hashkey%hash_jump_prime; hash_ncells++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } void write_hash_primejump_report_level_3(uint ic, ulong hashkey, int *hash){ int icount = 0; uint hashloc; uint jump = 1+hashkey%hash_jump_prime; hash_ncells++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; int hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); } write_hash_collisions += icount; hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; } #ifdef _OPENMP #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_perfect_openmp(uint ic, ulong hashkey, int *hash){ #else void write_hash_perfect_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif hash[hashkey] = ic; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_linear_openmp(uint ic, ulong hashkey, int *hash){ #else void write_hash_linear_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc++; hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); hashloc++; hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_linear_openmp_report_level_1(uint ic, ulong hashkey, int *hash){ #else void write_hash_linear_openmp_report_level_1(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc++; hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); icount++; } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); hashloc++; hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); icount++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_linear_openmp_report_level_2(uint ic, ulong hashkey, int *hash){ #else void write_hash_linear_openmp_report_level_2(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc++; hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); icount++; } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); hashloc++; hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); icount++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_linear_openmp_report_level_3(uint ic, ulong hashkey, int *hash){ #else void write_hash_linear_openmp_report_level_3(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc++; hashloc %= hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); icount++; } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); hashloc++; hashloc = hashloc%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); omp_set_lock(&(lock[hashloc])); icount++; } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_quadratic_openmp(uint ic, ulong hashkey, int *hash){ #else void write_hash_quadratic_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*icount); hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*icount); hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_quadratic_openmp_report_level_1(uint ic, ulong hashkey, int *hash){ #else void write_hash_quadratic_openmp_report_level_1(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*icount); hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*icount); hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_quadratic_openmp_report_level_2(uint ic, ulong hashkey, int *hash){ #else void write_hash_quadratic_openmp_report_level_2(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*icount); hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*icount); hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_quadratic_openmp_report_level_3(uint ic, ulong hashkey, int *hash){ #else void write_hash_quadratic_openmp_report_level_3(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*icount); hashloc %= hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*icount); hashloc = hashloc%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_primejump_openmp(uint ic, ulong hashkey, int *hash){ #else void write_hash_primejump_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint jump = 1+hashkey%hash_jump_prime; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*jump); hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*jump); hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_primejump_openmp_report_level_1(uint ic, ulong hashkey, int *hash){ #else void write_hash_primejump_openmp_report_level_1(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint jump = 1+hashkey%hash_jump_prime; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*jump); hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*jump); hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_primejump_openmp_report_level_2(uint ic, ulong hashkey, int *hash){ #else void write_hash_primejump_openmp_report_level_2(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint jump = 1+hashkey%hash_jump_prime; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*jump); hashloc %= hashtablesize; old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*jump); hashloc = hashloc%hashtablesize; omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void write_hash_primejump_openmp_report_level_3(uint ic, ulong hashkey, int *hash){ #else void write_hash_primejump_openmp_report_level_3(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){ #endif int icount = 0; uint jump = 1+hashkey%hash_jump_prime; uint hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 int MaxTries = 1000; int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); //printf("old_key is %d\n",old_key); for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){ hashloc+=(icount*jump); hashloc %= hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey); } if (icount < MaxTries) hash[2*hashloc+1] = ic; #else omp_set_lock(&(lock[hashloc])); while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){ omp_unset_lock(&(lock[hashloc])); icount++; hashloc+=(icount*jump); hashloc = hashloc%hashtablesize; printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); omp_set_lock(&(lock[hashloc])); } hash[2*hashloc] = hashkey; hash[2*hashloc+1] = ic; omp_unset_lock(&(lock[hashloc])); #endif #pragma omp atomic write_hash_collisions += icount;; #pragma omp atomic hash_ncells++; } #endif int read_hash_perfect(ulong hashkey, int *hash){ return(hash[hashkey]); } int read_hash_linear(ulong hashkey, int *hash){ int hashval = -1; uint hashloc; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize); if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_linear_report_level_1(ulong hashkey, int *hash){ int hashval = -1; uint hashloc; int icount=0; hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_linear_report_level_2(ulong hashkey, int *hash){ int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_linear_report_level_3(ulong hashkey, int *hash){ int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; hash_queries++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; uint hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_quadratic(ulong hashkey, int *hash){ int hashval = -1; uint hashloc; int icount=0; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; } if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_quadratic_report_level_1(ulong hashkey, int *hash){ int hashval = -1; uint hashloc; int icount=0; hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_quadratic_report_level_2(ulong hashkey, int *hash){ int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_quadratic_report_level_3(ulong hashkey, int *hash){ int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; hash_queries++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; uint hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_primejump(ulong hashkey, int *hash){ int hashval = -1; uint hashloc; int icount=0; uint jump = 1+hashkey%hash_jump_prime; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; } if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_primejump_report_level_1(ulong hashkey, int *hash){ int hashval = -1; uint hashloc; int icount=0; uint jump = 1+hashkey%hash_jump_prime; hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_primejump_report_level_2(ulong hashkey, int *hash){ int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; uint jump = 1+hashkey%hash_jump_prime; hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } int read_hash_primejump_report_level_3(ulong hashkey, int *hash){ int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; uint jump = 1+hashkey%hash_jump_prime; hash_queries++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; uint hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); } void compact_hash_delete(int *hash){ read_hash = NULL; free(hash); } #ifdef _OPENMP #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 void compact_hash_delete_openmp(int *hash){ #else void compact_hash_delete_openmp(int *hash, omp_lock_t *lock){ #endif read_hash = NULL; genvectorfree((void *)hash); #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 if (hash_method != PERFECT_HASH){ for (uint i = 0; i<hashtablesize; i++){ omp_destroy_lock(&(lock[i])); } free(lock); } #endif hash_method = METHOD_UNSET; } #endif void write_hash_collision_report(void){ if (hash_method == PERFECT_HASH) return; if (hash_report_level == 1) { write_hash_collisions_runsum += (double)write_hash_collisions/(double)hash_ncells; write_hash_collisions_count++; } else if (hash_report_level >= 2) { printf("Write hash collision report -- collisions per cell %lf, collisions %d cells %d\n",(double)write_hash_collisions/(double)hash_ncells,write_hash_collisions,hash_ncells); } } void read_hash_collision_report(void){ if (hash_method == PERFECT_HASH) return; if (hash_report_level == 1) { read_hash_collisions_runsum += (double)read_hash_collisions/(double)hash_queries; read_hash_collisions_count++; } else if (hash_report_level >= 2) { printf("Read hash collision report -- collisions per cell %lf, collisions %d cells %d\n",(double)read_hash_collisions/(double)hash_queries,read_hash_collisions,hash_queries); hash_queries = 0; read_hash_collisions = 0; } } void final_hash_collision_report(void){ if (hash_report_level >= 1 && read_hash_collisions_count > 0) { printf("Final hash collision report -- write/read collisions per cell %lf/%lf\n",write_hash_collisions_runsum/(double)write_hash_collisions_count,read_hash_collisions_runsum/(double)read_hash_collisions_count); } } #ifdef HAVE_OPENCL char *get_hash_kernel_source_string(void){ return (char*) simplehash_kern_source; } cl_kernel init_kernel; void hash_lib_init(cl_context context){ cl_int error; cl_program program = clCreateProgramWithSource(context, 1, (const char **)&simplehashlib_kern_source, NULL, &error); if (error != CL_SUCCESS){ printf("clCreateProgramWithSource returned an error %d at line %d in file %s\n", error,__LINE__,__FILE__); } size_t nReportSize; char* BuildReport; #ifdef HAVE_CL_DOUBLE error = clBuildProgram(program, 0, NULL, "-DHAVE_CL_DOUBLE", NULL, NULL); #else error = clBuildProgram(program, 0, NULL, "-DNO_CL_DOUBLE -cl-single-precision-constant", NULL, NULL); #endif if (error != CL_SUCCESS){ printf("clBuildProgram returned an error %d at line %d in file %s\n", error,__LINE__,__FILE__); cl_device_id device; clGetContextInfo(context, CL_CONTEXT_DEVICES, 1, &device, NULL); if (context == NULL){ printf("EZCL_DEVTYPE_INIT: Failed to find device and setup context in file %s at line %d\n", __FILE__, __LINE__); exit(-1); // No device is available, something is wrong } error = clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, NULL, &nReportSize); if (error != CL_SUCCESS) { switch (error){ case CL_INVALID_DEVICE: printf("Invalid device in clProgramBuildInfo\n"); break; case CL_INVALID_VALUE: printf("Invalid value in clProgramBuildInfo\n"); break; case CL_INVALID_PROGRAM: printf("Invalid program in clProgramBuildInfo\n"); break; } } BuildReport = (char *)malloc(nReportSize); error = clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, nReportSize, BuildReport, NULL); if (error != CL_SUCCESS) { switch (error){ case CL_INVALID_DEVICE: printf("Invalid device in clProgramBuildInfo\n"); break; case CL_INVALID_VALUE: printf("Invalid value in clProgramBuildInfo\n"); break; case CL_INVALID_PROGRAM: printf("Invalid program in clProgramBuildInfo\n"); break; } } printf("%s\n", BuildReport); } init_kernel = clCreateKernel(program, "init_kern", &error); if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__); } cl_mem hash_init (int hash_size, int TILE_SIZE, int do_init, cl_context context, cl_command_queue queue, long *gpu_time) { cl_int error; long gpu_time_start, gpu_time_end; cl_mem hash_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, hash_size*sizeof(int), NULL, &error); if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__); //init to -1 if (do_init) { error = clSetKernelArg(init_kernel, 0, sizeof(cl_uint), &hash_size); if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__); error = clSetKernelArg(init_kernel, 1, sizeof(cl_mem), (void*)&hash_buffer); if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__); size_t global_work_size; size_t local_work_size; local_work_size = TILE_SIZE; global_work_size = ((hash_size+local_work_size-1)/local_work_size)*local_work_size; cl_event hash_init_event; error = clEnqueueNDRangeKernel(queue, init_kernel, 1, 0, &global_work_size, &local_work_size, 0, NULL, &hash_init_event); if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__); clWaitForEvents(1,&hash_init_event); clGetEventProfilingInfo(hash_init_event, CL_PROFILING_COMMAND_START, sizeof(gpu_time_start), &gpu_time_start, NULL); clGetEventProfilingInfo(hash_init_event, CL_PROFILING_COMMAND_END, sizeof(gpu_time_end), &gpu_time_end, NULL); *gpu_time = gpu_time_end - gpu_time_start; clReleaseEvent(hash_init_event); } //if (DETAILED_TIMING) printf("\n\tinit %.6lf,", (double)(gpu_time_end - gpu_time_start)*1.0e-9); return(hash_buffer); } #endif int read_dev_hash(int hash_method, ulong hashtablesize, ulong AA, ulong BB, ulong hashkey, int *hash){ //int hash_report_level = 3; int max_collisions_allowed = 1000; int hashval = -1; uint hashloc; int icount=0; if (hash_method == PERFECT_HASH) { return(hash[hashkey]); } if (hash_method == LINEAR) { if (hash_report_level == 0) { for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; } } else if (hash_report_level == 1) { hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; } read_hash_collisions += icount; } else if (hash_report_level == 2) { hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; } else if (hash_report_level == 3) { hash_queries++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){ icount++; uint hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; } else { printf("Error -- Illegal value of hash_report_level %d\n",hash_report_level); exit(1); } } else if (hash_method == QUADRATIC) { if (hash_report_level == 0) { for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; } } else if (hash_report_level == 1) { hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; } read_hash_collisions += icount; } else if (hash_report_level == 2) { hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; } else if (hash_report_level == 3) { hash_queries++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){ icount++; uint hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; } else { printf("Error -- Illegal value of hash_report_level %d\n",hash_report_level); exit(1); } } else if (hash_method == PRIME_JUMP) { uint jump = 1+hashkey%hash_jump_prime; if (hash_report_level == 0) { for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; } } else if (hash_report_level == 1) { hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; } read_hash_collisions += icount; } else if (hash_report_level == 2) { hash_queries++; for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; } else if (hash_report_level == 3) { hash_queries++; hashloc = (hashkey*AA+BB)%prime%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride); for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){ icount++; uint hashloctmp = hashloc+1; hashloctmp = hashloctmp%hashtablesize; printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride); if (icount > max_collisions_allowed) { printf("Error -- too many read hash collisions\n"); exit(0); } } read_hash_collisions += icount; } else { printf("Error -- Illegal value of hash_report_level %d\n",hash_report_level); exit(1); } } else { printf("Error -- Illegal value of hash_method %d\n",hash_method); exit(1); } if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1]; return(hashval); }
zgelqs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gelqs * * Computes a minimum-norm solution min | A*X - B | using the * LQ factorization A = L*Q computed by plasma_zgelqf. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= m >= 0. * * @param[in] nrhs * The number of columns of B. nrhs >= 0. * * @param[in] pA * Details of the LQ factorization of the original matrix A as returned * by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= m. * * @param[in] T * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[in,out] pB * On entry, pointer to the m-by-nrhs right hand side matrix B. * On exit, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zgelqs * @sa plasma_cgelqs * @sa plasma_dgelqs * @sa plasma_sgelqs * @sa plasma_zgelqf * ******************************************************************************/ int plasma_zgelqs(int m, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_desc_t T, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0 || m > n) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, imax(1, n))) { plasma_error("illegal value of ldb"); return -8; } // quick return if (m == 0 || n == 0 || nrhs == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmlq: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zgelqs(A, T, B, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gelqs * * Computes a minimum-norm solution using previously computed LQ factorization. * Non-blocking tile version of plasma_zgelqs(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[in,out] B * Descriptor of matrix B. * On entry, right-hand side matrix B in the tile layout. * On exit, solution matrix X in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgelqs * @sa plasma_omp_cgelqs * @sa plasma_omp_dgelqs * @sa plasma_omp_sgelqs * @sa plasma_omp_zgelqf * ******************************************************************************/ void plasma_omp_zgelqs(plasma_desc_t A, plasma_desc_t T, plasma_desc_t B, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid descriptor T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid descriptor B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0 || B.n == 0) return; // Zero the trailing block of the right-hand-side matrix. // B has less rows than X. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, plasma_desc_view(B, A.m, 0, A.n - A.m, B.n), sequence, request); // Solve L * Y = B. plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.m, A.m), plasma_desc_view(B, 0, 0, A.m, B.n), sequence, request); // Find X = Q^H * Y. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmlq(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } }
par_lr_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.24 $ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #define MAX_C_CONNECTIONS 100 #define HAVE_COMMON_C 1 /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildStdInterp * Comment: The interpolatory weighting can be changed with the sep_weight * variable. This can enable not separating negative and positive * off diagonals in the weight formula. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int max_elmts, HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; double *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; double *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *found = NULL; HYPRE_Int num_cols_P_offd; HYPRE_Int newoff, loc_col; HYPRE_Int A_ext_rows, full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int Soprows; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; HYPRE_Int *ihat = NULL; HYPRE_Int *ihat_offd = NULL; HYPRE_Int *ipnt = NULL; HYPRE_Int *ipnt_offd = NULL; HYPRE_Int strong_f_marker = -2; /* Interpolation weight variables */ double *ahat = NULL; double *ahat_offd = NULL; double sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C; double diagonal, distribute; double alfa = 1.; double beta = 1.; /* Loop variables */ HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, j, j1, jj, kk, k1; HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx; /* Definitions */ double zero = 0.0; double one = 1.0; double wall_time; double wall_1 = 0; double wall_2 = 0; double wall_3 = 0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag== 4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ newoff = 0; full_off_procNodes = 0; if (num_procs > 1) { /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); A_ext_rows = hypre_CSRMatrixNumRows(A_ext); Sop = hypre_ParCSRMatrixExtractBExt(S,A,0); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); Soprows = hypre_CSRMatrixNumRows(Sop); /* Find nodes that are neighbors of neighbors, not found in offd */ newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, CF_marker, comm_pkg); if(newoff >= 0) full_off_procNodes = newoff + num_cols_A_offd; else return hypre_error_flag; /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, &extend_comm_pkg); if (full_off_procNodes) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); if (num_functions > 1 && full_off_procNodes > 0) dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker, full_off_procNodes, CF_marker_offd); if(num_functions > 1) alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func, full_off_procNodes, dof_func_offd); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; coarse_counter_offd = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } /* Initialize ahat, which is a modification to a, used in the standard * interpolation routine. */ if (n_fine) { ahat = hypre_CTAlloc(double, n_fine); ihat = hypre_CTAlloc(HYPRE_Int, n_fine); ipnt = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { ahat_offd = hypre_CTAlloc(double, full_off_procNodes); ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; ahat[i] = 0; ihat[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; ahat_offd[i] = 0; ihat_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = i1; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = k1; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd]=i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = loc_col; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_1 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); cnt_c = 0; cnt_f = jj_end_row-jj_begin_row; cnt_c_offd = 0; cnt_f_offd = jj_end_row_offd-jj_begin_row_offd; ihat[i] = cnt_f; ipnt[cnt_f] = i; ahat[cnt_f++] = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is direct neighbor */ i1 = A_diag_j[jj]; if (P_marker[i1] != strong_f_marker) { indx = ihat[i1]; if (indx > -1) ahat[indx] += A_diag_data[jj]; else if (P_marker[i1] >= jj_begin_row) { ihat[i1] = cnt_c; ipnt[cnt_c] = i1; ahat[cnt_c++] += A_diag_data[jj]; } else if (CF_marker[i1] != -3) { ihat[i1] = cnt_f; ipnt[cnt_f] = i1; ahat[cnt_f++] += A_diag_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func[i1]) { distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]]; for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++) { k1 = A_diag_j[kk]; indx = ihat[k1]; if (indx > -1) ahat[indx] -= A_diag_data[kk]*distribute; else if (P_marker[k1] >= jj_begin_row) { ihat[k1] = cnt_c; ipnt[cnt_c] = k1; ahat[cnt_c++] -= A_diag_data[kk]*distribute; } else { ihat[k1] = cnt_f; ipnt[cnt_f] = k1; ahat[cnt_f++] -= A_diag_data[kk]*distribute; } } if(num_procs > 1) { for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++) { k1 = A_offd_j[kk]; indx = ihat_offd[k1]; if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1]) { if (indx > -1) ahat_offd[indx] -= A_offd_data[kk]*distribute; else if (P_marker_offd[k1] >= jj_begin_row_offd) { ihat_offd[k1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = k1; ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute; } else { ihat_offd[k1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = k1; ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute; } } } } } } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] != strong_f_marker) { indx = ihat_offd[i1]; if (indx > -1) ahat_offd[indx] += A_offd_data[jj]; else if (P_marker_offd[i1] >= jj_begin_row_offd) { ihat_offd[i1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = i1; ahat_offd[cnt_c_offd++] += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { ihat_offd[i1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = i1; ahat_offd[cnt_f_offd++] += A_offd_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]]; for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++) { k1 = A_ext_j[kk]; if(k1 >= col_1 && k1 < col_n) { /*diag*/ loc_col = k1 - col_1; indx = ihat[loc_col]; if (indx > -1) ahat[indx] -= A_ext_data[kk]*distribute; else if (P_marker[loc_col] >= jj_begin_row) { ihat[loc_col] = cnt_c; ipnt[cnt_c] = loc_col; ahat[cnt_c++] -= A_ext_data[kk]*distribute; } else { ihat[loc_col] = cnt_f; ipnt[cnt_f] = loc_col; ahat[cnt_f++] -= A_ext_data[kk]*distribute; } } else { loc_col = -k1 - 1; if(num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1]) { indx = ihat_offd[loc_col]; if (indx > -1) ahat_offd[indx] -= A_ext_data[kk]*distribute; else if(P_marker_offd[loc_col] >= jj_begin_row_offd) { ihat_offd[loc_col] = cnt_c_offd; ipnt_offd[cnt_c_offd] = loc_col; ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute; } else { ihat_offd[loc_col] = cnt_f_offd; ipnt_offd[cnt_f_offd] = loc_col; ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_2 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); diagonal = ahat[cnt_c]; ahat[cnt_c] = 0; sum_pos = 0; sum_pos_C = 0; sum_neg = 0; sum_neg_C = 0; sum = 0; sum_C = 0; if(sep_weight == 1) { for (jj=0; jj < cnt_c; jj++) { if (ahat[jj] > 0) { sum_pos_C += ahat[jj]; } else { sum_neg_C += ahat[jj]; } } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos_C += ahat_offd[jj]; } else { sum_neg_C += ahat_offd[jj]; } } } sum_pos = sum_pos_C; sum_neg = sum_neg_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { if (ahat[jj] > 0) { sum_pos += ahat[jj]; } else { sum_neg += ahat[jj]; } ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos += ahat_offd[jj]; } else { sum_neg += ahat_offd[jj]; } ahat_offd[jj] = 0; } } if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal; if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; if (ahat[j1] > 0) P_diag_data[jj] = -beta*ahat[j1]; else P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; if (ahat_offd[j1] > 0) P_offd_data[jj] = -beta*ahat_offd[j1]; else P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } else { for (jj=0; jj < cnt_c; jj++) { sum_C += ahat[jj]; } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { sum_C += ahat_offd[jj]; } } sum = sum_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { sum += ahat[jj]; ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { sum += ahat_offd[jj]; ahat_offd[jj] = 0; } } if (sum_C*diagonal) alfa = sum/sum_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_3 += wall_time; fflush(NULL); } } } if (debug_flag==4) { hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n", my_id, wall_1, wall_2, wall_3); fflush(NULL); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ num_cols_P_offd = 0; if(P_offd_size) { hypre_TFree(P_marker); if (full_off_procNodes) P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } hypre_TFree(P_marker); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); hypre_TFree(ahat); hypre_TFree(ihat); hypre_TFree(ipnt); if (full_off_procNodes) { hypre_TFree(ahat_offd); hypre_TFree(ihat_offd); hypre_TFree(ipnt_offd); } if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_TFree(found); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; double *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; double *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *found = NULL; HYPRE_Int num_cols_P_offd; HYPRE_Int newoff, loc_col; HYPRE_Int A_ext_rows, full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int Soprows, sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ double sum, diagonal, distribute; HYPRE_Int strong_f_marker; /* Loop variables */ HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1); HYPRE_Int * diag_offset; HYPRE_Int * fine_to_coarse_offset; HYPRE_Int * offd_offset; /* Definitions */ double zero = 0.0; double one = 1.0; double wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ newoff = 0; full_off_procNodes = 0; if (num_procs > 1) { /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); A_ext_rows = hypre_CSRMatrixNumRows(A_ext); Sop = hypre_ParCSRMatrixExtractBExt(S,A,0); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); Soprows = hypre_CSRMatrixNumRows(Sop); /* Find nodes that are neighbors of neighbors, not found in offd */ newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, CF_marker, comm_pkg); if(newoff >= 0) full_off_procNodes = newoff + num_cols_A_offd; else return hypre_error_flag; /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, &extend_comm_pkg); if (full_off_procNodes) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); if (num_functions > 1 && full_off_procNodes > 0) dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker, full_off_procNodes, CF_marker_offd); if(num_functions > 1) alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func, full_off_procNodes, dof_func_offd); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } /* This function is smart enough to check P_marker and P_marker_offd only, * and set them if they are not NULL. The other vectors are set regardless.*/ initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); /*----------------------------------------------------------------------- * Initialize threading variables *-----------------------------------------------------------------------*/ max_num_threads[0] = hypre_NumThreads(); diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); for(i=0; i < max_num_threads[0]; i++) { diag_offset[i] = 0; fine_to_coarse_offset[i] = 0; offd_offset[i] = 0; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker) #endif { /* Parallelize by computing only over each thread's range of rows. * * The first large for loop computes ~locally~ for each thread P_diag_i, * P_offd_i and fine_to_coarse. Then, the arrays are stitched together * For eaxample the first phase would compute * P_diag_i = [0, 2, 4, 7, 2, 5, 6] * for two threads. P_diag_i[stop] points to the end of that * thread's data, but P_diag_i[start] points to the end of the * previous thread's row range. This is then stitched together at the * end to yield, * P_diag_i = [0, 2, 4, 7, 9, 14, 15]. * * The second large for loop computes interpolation weights and is * relatively straight-forward to thread. */ /* initialize thread-wise variables */ strong_f_marker = -2; coarse_counter = 0; jj_counter = start_indexing; jj_counter_offd = start_indexing; if (n_fine) { P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1;} } /* this thread's row range */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } /* loop over rows */ for (i = start; i < stop; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } /*----------------------------------------------------------------------- * End loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif P_diag_i[stop] = jj_counter; P_offd_i[stop] = jj_counter_offd; fine_to_coarse_offset[my_thread_num] = coarse_counter; diag_offset[my_thread_num] = jj_counter; offd_offset[my_thread_num] = jj_counter_offd; /* Stitch P_diag_i, P_offd_i and fine_to_coarse together */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { /* Calculate the offset for P_diag_i and P_offd_i for each thread */ for (i = 1; i < num_threads; i++) { diag_offset[i] = diag_offset[i-1] + diag_offset[i]; fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i]; offd_offset[i] = offd_offset[i-1] + offd_offset[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num > 0) { /* update row pointer array with offset, * making sure to update the row stop index */ for (i = start+1; i <= stop; i++) { P_diag_i[i] += diag_offset[my_thread_num-1]; P_offd_i[i] += offd_offset[my_thread_num-1]; } /* update fine_to_coarse by offsetting with the offset * from the preceding thread */ for (i = start; i < stop; i++) { if(fine_to_coarse[i] >= 0) { fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } } /* Fine to coarse mapping */ if(num_procs > 1 && my_thread_num == 0) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { jj_begin_row = P_diag_i[i]; jj_begin_row_offd = P_offd_i[i]; jj_counter = jj_begin_row; jj_counter_offd = jj_begin_row_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if(i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if((P_marker[loc_col] >= jj_begin_row || loc_col == i) && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if(loc_col == i && (sgn*A_ext_data[jj1]) < 0) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } /*----------------------------------------------------------------------- * End large for loop over nfine *-----------------------------------------------------------------------*/ if (n_fine) { hypre_TFree(P_marker); } if (full_off_procNodes) { hypre_TFree(P_marker_offd); } } /*----------------------------------------------------------------------- * End PAR_REGION *-----------------------------------------------------------------------*/ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ num_cols_P_offd = 0; if(P_offd_size) { hypre_TFree(P_marker); if (full_off_procNodes) P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; /* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if * tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the * total number of times P_marker is set */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if(tmp_CF_marker_offd[index] >= 0) { P_marker[index] = 1; } } num_cols_P_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:num_cols_P_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < full_off_procNodes; i++) { if(P_marker[i]) { num_cols_P_offd++; } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); /* col_map_offd_P[i] = index of i-th nonzero in P_marker * JBS: Not worth parallelizing this for loop with OMP */ index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); /* col_map_offd_P[i] = fine_to_coarse[ index of i-th nonzero in P_marker ] * JBS: Not worth parallelizing this for loop with OMP */ index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE #endif for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } hypre_TFree(P_marker); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(max_num_threads); hypre_TFree(fine_to_coarse); hypre_TFree(diag_offset); hypre_TFree(offd_offset); hypre_TFree(fine_to_coarse_offset); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_TFree(found); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPICCInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; double *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int **ext_p, **ext_p_offd;*/ HYPRE_Int ccounter_offd; /*HYPRE_Int *clist_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; double *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *found = NULL; HYPRE_Int num_cols_P_offd; HYPRE_Int newoff, loc_col; HYPRE_Int A_ext_rows, full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int Soprows, sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; /* Interpolation weight variables */ double sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1; HYPRE_Int ccounter; /*HYPRE_Int *clist, ccounter;*/ /* Definitions */ double zero = 0.0; double one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ newoff = 0; full_off_procNodes = 0; if (num_procs > 1) { /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); A_ext_rows = hypre_CSRMatrixNumRows(A_ext); Sop = hypre_ParCSRMatrixExtractBExt(S,A,0); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); Soprows = hypre_CSRMatrixNumRows(Sop); /* Find nodes that are neighbors of neighbors, not found in offd */ newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, CF_marker, comm_pkg); if(newoff >= 0) full_off_procNodes = newoff + num_cols_A_offd; else return hypre_error_flag; /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, &extend_comm_pkg); if (full_off_procNodes) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); if (num_functions > 1 && full_off_procNodes > 0) dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker, full_off_procNodes, CF_marker_offd); if(num_functions > 1) alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func, full_off_procNodes, dof_func_offd); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } /*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS); for(i = 0; i < MAX_C_CONNECTIONS; i++) clist[i] = 0; if(num_procs > 1) { clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS); for(i = 0; i < MAX_C_CONNECTIONS; i++) clist_offd[i] = 0; }*/ initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; coarse_counter_offd = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /* Initialize ccounter for each f point */ ccounter = 0; ccounter_offd = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { /*clist[ccounter++] = i1;*/ P_marker[i1] = jj_counter; jj_counter++; } } } /*qsort0(clist,0,ccounter-1);*/ if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < P_offd_i[i]) { /*clist_offd[ccounter_offd++] = i1;*/ tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if(hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if(!common_c) { /* No common c point, extend the interp set */ for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if(CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; /*break;*/ } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; /*break;*/ } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] == -1) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { /*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { /*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; /*break;*/ } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; /*break;*/ } } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; ccounter = start_indexing; ccounter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { ccounter = 0; ccounter_offd = 0; strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; /*clist[ccounter++] = i1;*/ } } } /*qsort0(clist,0,ccounter-1);*/ if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*clist_offd[ccounter_offd++] = i1;*/ } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if(CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if(hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if(!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if(CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { /*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { /*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if(i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if((P_marker[loc_col] >= jj_begin_row || loc_col == i) && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if(loc_col == i && (sgn*A_ext_data[jj1]) < 0) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ num_cols_P_offd = 0; if(P_offd_size) { hypre_TFree(P_marker); if (full_off_procNodes) P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); /*hypre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_TFree(found); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFFInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; double *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int ccounter_offd; HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; double *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *found = NULL; HYPRE_Int num_cols_P_offd; HYPRE_Int newoff, loc_col; HYPRE_Int A_ext_rows, full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int Soprows; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; /* Interpolation weight variables */ double sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1; HYPRE_Int ccounter; /*HYPRE_Int *clist, ccounter;*/ /* Definitions */ double zero = 0.0; double one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ newoff = 0; full_off_procNodes = 0; if (num_procs > 1) { /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); A_ext_rows = hypre_CSRMatrixNumRows(A_ext); Sop = hypre_ParCSRMatrixExtractBExt(S,A,0); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); Soprows = hypre_CSRMatrixNumRows(Sop); /* Find nodes that are neighbors of neighbors, not found in offd */ newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, CF_marker, comm_pkg); if(newoff >= 0) full_off_procNodes = newoff + num_cols_A_offd; else return hypre_error_flag; /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, &extend_comm_pkg); if (full_off_procNodes) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); if (num_functions > 1 && full_off_procNodes > 0) dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker, full_off_procNodes, CF_marker_offd); if(num_functions > 1) alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func, full_off_procNodes, dof_func_offd); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; coarse_counter_offd = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ ccounter = 0; ccounter_offd = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if(CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; ccounter = start_indexing; ccounter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { ccounter = 0; ccounter_offd = 0; strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if(CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if(CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1])<0) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ num_cols_P_offd = 0; if(P_offd_size) { hypre_TFree(P_marker); if (full_off_procNodes) P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_TFree(found); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFF1Interp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; double *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int ccounter_offd; HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; double *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *found = NULL; HYPRE_Int num_cols_P_offd; HYPRE_Int newoff, loc_col; HYPRE_Int A_ext_rows, full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int Soprows; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; /* Interpolation weight variables */ double sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1; HYPRE_Int ccounter; HYPRE_Int found_c = 0; /* Definitions */ double zero = 0.0; double one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ newoff = 0; full_off_procNodes = 0; if (num_procs > 1) { /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); A_ext_rows = hypre_CSRMatrixNumRows(A_ext); Sop = hypre_ParCSRMatrixExtractBExt(S,A,0); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); Soprows = hypre_CSRMatrixNumRows(Sop); /* Find nodes that are neighbors of neighbors, not found in offd */ newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, CF_marker, comm_pkg); if(newoff >= 0) full_off_procNodes = newoff + num_cols_A_offd; else return hypre_error_flag; /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, &extend_comm_pkg); if (full_off_procNodes) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); if (num_functions > 1 && full_off_procNodes > 0) dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker, full_off_procNodes, CF_marker_offd); if(num_functions > 1) alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func, full_off_procNodes, dof_func_offd); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; coarse_counter_offd = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ ccounter = 0; ccounter_offd = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ found_c = 0; for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if(CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; found_c = 1; break; } } } if(num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; break; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; break; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; break; } } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; ccounter = start_indexing; ccounter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { ccounter = 0; ccounter_offd = 0; strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if(CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ found_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; found_c = 1; break; } } } if(num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if(CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; break; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1])<0) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ num_cols_P_offd = 0; if(P_offd_size) { hypre_TFree(P_marker); if (full_off_procNodes) P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); /*hynre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_TFree(found); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; double *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; double *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *found = NULL; HYPRE_Int num_cols_P_offd; HYPRE_Int newoff, loc_col; HYPRE_Int A_ext_rows, full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int Soprows, sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; /* Interpolation weight variables */ double sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1; /* Definitions */ double zero = 0.0; double one = 1.0; double wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ newoff = 0; full_off_procNodes = 0; if (num_procs > 1) { /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); A_ext_rows = hypre_CSRMatrixNumRows(A_ext); Sop = hypre_ParCSRMatrixExtractBExt(S,A,0); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); Soprows = hypre_CSRMatrixNumRows(Sop); /* Find nodes that are neighbors of neighbors, not found in offd */ newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, CF_marker, comm_pkg); if(newoff >= 0) full_off_procNodes = newoff + num_cols_A_offd; else return hypre_error_flag; /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, &extend_comm_pkg); if (full_off_procNodes) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); if (num_functions > 1 && full_off_procNodes > 0) dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker, full_off_procNodes, CF_marker_offd); if(num_functions > 1) alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func, full_off_procNodes, dof_func_offd); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; coarse_counter_offd = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if((P_marker[loc_col] >= jj_begin_row ) && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ num_cols_P_offd = 0; if(P_offd_size) { hypre_TFree(P_marker); if (full_off_procNodes) P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } hypre_TFree(P_marker); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_TFree(found); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; }
ast-dump-openmp-teams.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp target #pragma omp teams ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams.c:3:1, line:7:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:7:1> // CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:1, col:18> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CapturedStmt {{.*}} <col:1, col:18> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-OMPTeamsDirective {{.*}} <col:1, col:18> openmp_structured_block // CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:6:3> // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-NullStmt {{.*}} <col:3> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:5:1) *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:4:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-NullStmt {{.*}} <line:6:3> openmp_structured_block // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:5:1) *const restrict' // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-OMPTeamsDirective {{.*}} <line:5:1, col:18> openmp_structured_block // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:6:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-NullStmt {{.*}} <col:3> openmp_structured_block // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:5:1) *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <line:6:3> openmp_structured_block // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams.c:5:1) *const restrict'
util.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "thd_info.h" #include "util.h" /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ val_t rand_val(void) { /* TODO: modify this to work based on the size of idx_t */ val_t v = 3.0 * ((val_t) rand() / (val_t) RAND_MAX); if(rand() % 2 == 0) { v *= -1; } return v; } idx_t rand_idx(void) { /* TODO: modify this to work based on the size of idx_t */ return (idx_t) (rand() << 16) | rand(); } void fill_rand( val_t * const restrict vals, idx_t const nelems) { for(idx_t i=0; i < nelems; ++i) { vals[i] = rand_val(); } } char * bytes_str( size_t const bytes) { double size = (double)bytes; int suff = 0; const char *suffix[5] = {"B", "KB", "MB", "GB", "TB"}; while(size > 1024 && suff < 5) { size /= 1024.; ++suff; } char * ret = NULL; if(asprintf(&ret, "%0.2f%s", size, suffix[suff]) == -1) { fprintf(stderr, "SPLATT: asprintf failed with %zu bytes.\n", bytes); ret = NULL; } return ret; } idx_t argmax_elem( idx_t const * const arr, idx_t const N) { idx_t mkr = 0; for(idx_t i=1; i < N; ++i) { if(arr[i] > arr[mkr]) { mkr = i; } } return mkr; } idx_t argmin_elem( idx_t const * const arr, idx_t const N) { idx_t mkr = 0; for(idx_t i=1; i < N; ++i) { if(arr[i] < arr[mkr]) { mkr = i; } } return mkr; } int * get_primes( int N, int * nprimes) { int size = 10; int * p = (int *) splatt_malloc(size * sizeof(int)); int np = 0; while(N != 1) { int i; for(i=2; i <= N; ++i) { if(N % i == 0) { /* found the next prime */ break; } } /* realloc if necessary */ if(size == np) { p = (int *) realloc(p, size * 2 * sizeof(int)); } p[np++] = i; N /= i; } *nprimes = np; return p; } void par_memcpy( void * const restrict dst, void const * const restrict src, size_t const bytes) { #pragma omp parallel { int nthreads = splatt_omp_get_num_threads(); int tid = splatt_omp_get_thread_num(); size_t n_per_thread = (bytes + nthreads - 1)/nthreads; size_t n_begin = SS_MIN(n_per_thread * tid, bytes); size_t n_end = SS_MIN(n_begin + n_per_thread, bytes); memcpy((char *)dst + n_begin, (char *)src + n_begin, n_end - n_begin); } }
GB_unaryop__lnot_fp64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_uint32 // op(A') function: GB_tran__lnot_fp64_uint32 // C type: double // A type: uint32_t // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_uint32 ( double *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ccsd_pack.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> //#include <omp.h> #include "config.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" /* * a * v1 + b * v2.transpose(0,2,1,3) */ void CCmake_0213(double *out, double *v1, double *v2, int count, int m, double a, double b) { #pragma omp parallel default(none) \ shared(count, m, out, v1, v2, a, b) { int i, j, k, l, n; size_t d2 = m * m; size_t d1 = m * m * m; double *pv1, *pv2, *pout; #pragma omp for schedule (static) for (i = 0; i < count; i++) { for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++) { pout = out + d1*i + d2*j + m*k; pv1 = v1 + d1*i + d2*j + m*k; pv2 = v2 + d1*i + d2*k + m*j; for (l = 0; l < m; l++, n++) { pout[l] = pv1[l] * a + pv2[l] * b; } } } } } } /* * out = v1 + v2.transpose(0,2,1) */ void CCsum021(double *out, double *v1, double *v2, int count, int m) { #pragma omp parallel default(none) \ shared(count, m, out, v1, v2) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] + pv2[k*m+j]; } } } } } /* * g2 = a * v1 + b * v2.transpose(0,2,1) */ void CCmake_021(double *out, double *v1, double *v2, int count, int m, double a, double b) { if (a == 1 && b == 1) { return CCsum021(out, v1, v2, count, m); } #pragma omp parallel default(none) \ shared(count, m, out, v1, v2, a, b) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] * a + pv2[k*m+j] * b; } } } } } /* * if matrix B is symmetric for the contraction A_ij B_ij, * Tr(AB) ~ A_ii B_ii + (A_ij + A_ji) B_ij where i > j * This function extract the A_ii and the lower triangluar part of A_ij + A_ji */ void CCprecontract(double *out, double *in, int count, int m, double diagfac) { #pragma omp parallel default(none) \ shared(count, m, in, out, diagfac) { int i, j, k, n; size_t mm = m * m; size_t m2 = m * (m+1) / 2; double *pout, *pin; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + m2 * i; pin = in + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < j; k++, n++) { pout[n] = pin[j*m+k] + pin[k*m+j]; } pout[n] = pin[j*m+j] * diagfac; n++; } } } } /* * if i1 == j1: * eri = unpack_tril(eri, axis=0) * unpack_tril(eri).reshape(i1-i0,j1-j0,nao,nao).transpose(0,2,1,3) */ void CCload_eri(double *out, double *eri, int *orbs_slice, int nao) { int i0 = orbs_slice[0]; int i1 = orbs_slice[1]; int j0 = orbs_slice[2]; int j1 = orbs_slice[3]; size_t ni = i1 - i0; size_t nj = j1 - j0; size_t nn = nj * nao; size_t nao_pair = nao * (nao + 1) / 2; #pragma omp parallel default(none) \ shared(out, eri, i1, j1, ni, nj, nn, nao, nao_pair) { int i, j, k, l, ij; double *pout; double *buf = malloc(sizeof(double) * nao*nao); #pragma omp for schedule (static) for (ij = 0; ij < ni*nj; ij++) { i = ij / nj; j = ij % nj; NPdunpack_tril(nao, eri+ij*nao_pair, buf, 1); pout = out + (i*nn+j)*nao; for (k = 0; k < nao; k++) { for (l = 0; l < nao; l++) { pout[k*nn+l] = buf[k*nao+l]; } } } free(buf); } } /* * eri put virtual orbital first * [ v ] * [ v . ] * [ v . . ] * [ o . . . ] * [ o . . . . ] */ void CCsd_sort_inplace(double *eri, int nocc, int nvir, int count) { #pragma omp parallel default(none) \ shared(eri, nocc, nvir, count) { int ic, i, j, ij; size_t nmo = nocc + nvir; size_t nmo_pair = nmo * (nmo+1) / 2; size_t nocc_pair = nocc * (nocc+1) /2; size_t nvir_pair = nvir * (nvir+1) /2; double *peri, *pout; double *buf = malloc(sizeof(double) * nocc*nvir); #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { peri = eri + ic*nmo_pair + nvir_pair; for (i = 0; i < nocc; i++, peri+=nvir+i) { for (j = 0; j < nvir; j++) { buf[i*nvir+j] = peri[j]; } } pout = eri + ic*nmo_pair + nvir_pair; peri = eri + ic*nmo_pair + nvir_pair + nvir; for (ij = 0, i = 0; i < nocc; i++, peri+=nvir+i) { for (j = 0; j <= i; j++, ij++) { pout[ij] = peri[j]; } } pout = eri + ic*nmo_pair + nvir_pair + nocc_pair; memcpy(pout, buf, sizeof(double)*nocc*nvir); } free(buf); } }
c55c7aec73df0f31d67fbe39510946453b899e1d.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; double section2; } ; int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; #pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12]; u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8; } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])); int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])); int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])); int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1; int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1; int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]); float sum = 0.0F; if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } rec[time][p_rec] = sum; } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; } #pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) return 0; } /* Backdoor edit at Wed Mar 4 19:26:45 2020*/
mcrat.c
/* # Program to run a Monte Carlo radiation transfer through the 2D # simulations of GRB jets. # # Python code written by D. Lazzati at Oregonstate, C code written by Tyler Parsotan @ Oregon State # ver 0.1 July 8, 2015 # ver 1.1 July 20, 2015: added record of number of scatterings, included # all terms in weight. Should now give correct light curves. # ver 1.2 July 21, 2015: added parameter file to keep track of input # params of each simulation # ver 2.0 July 22, 2015: corrected the problem that arises when there is # no scattering in the time span of one frame. Fixed output arrays dimension. # ver 2.1 July 25, 2015: fixed bug that did not make the number of # scattering grow with the number of photons. # ver 3.0 July 28, 2015: using scipy nearest neighbor interpolation to # speed things up. Gained about factor 2 # ver 3.1 July 29, 2015: added radial spread of photon injection points # ver 3.2 July 31, 2015: added Gamma to the weight of photons!!! # ver 4.0 Aug 5, 2015: try to speed up by inverting cycle # ver 4.1 Aug 8, 2015: add spherical test as an option # ver 4.2 Aug 9, 2015: saving files appending rather than re-writing # ver 4.3 Aug 11, 2015: corrected error in the calculation of the local temperature # ver 4.4 Aug 13, 2015: added cylindrical test # ver 4.5 Aug 18, 2015: fixd various problems pointed by the cylindrical test # ver 4.6 Aug 21, 2015: corrected mean free path for large radii # ver 5.0 Aug 25, 2015: corrected problem with high-T electrons and excess scatterings # ver 5.1 Aug 25, 2015: cleaned-up coding # ver 5.2 Sept 3, 2015: fixed problem with number of scatterings for multiple injections * * ver 6.0 Dec 28, 2016: rewrote the code in C, added checkpoint file so if the code is interrupted all the progress wont be lost, made the code only need to be compiled once for a given MC_XXX directory path so you just need to supply the sub directory of MC_XXX as a command line argument * version 7.0 used OpenMP to parallelize the code by angle and the function findminmfp() version 8.0 added 3D capabilities for RIKEN hydro data and 2D capablities for RIKEN 2D hydro data and made it more efficient with grid selection to speed it up * Version 9.0 late 2017 included full Klein Nishina Cross Section and polarization with stokes parameters * Version 9.1 late 2018 including synchrotron absorption and emission */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <dirent.h> #include <math.h> #include <gsl/gsl_rng.h> #include "mclib.h" //#include "mclib_3d.h" //#include "mclib_pluto.h" #include <omp.h> #include "mpi.h" int main(int argc, char **argv) { //compile each time a macro is changed, have to supply the subfolder within the MC_PATH directory as a command line argument to the C program eg. MCRAT 1/ // Define variables char flash_prefix[200]=""; char mc_file[200]="" ; char spect;//type of spectrum char restrt;//restart or not double fps, fps_modified, theta_jmin, theta_jmax,hydro_domain_y, hydro_domain_x ;//frames per second of sim, min opening angle of jet, max opening angle of jet in radians, max y value in hydro domain double inj_radius_small, inj_radius_large, ph_weight_suggest, ph_weight_small, ph_weight_large ;//radius at chich photons are injected into sim int frm0,last_frm, frm2_small, frm2_large, j=0, min_photons, max_photons, frm0_small, frm0_large ;//frame starting from, last frame of sim, frame of last injection int dim_switch=0; int find_nearest_grid_switch=0; int increment_inj=1, increment_scatt=1; //increments for injection loop and scattering loop, outer and inner loops respectively, the increment can change for RIKEN 3D hydro files double inj_radius; int frm2,save_chkpt_success=0; char mc_filename[200]=""; char mc_filename_2[200]=""; char mc_operation[200]=""; char mc_dir[200]="" ; int file_count = 0; DIR * dirp; struct dirent * entry; struct stat st = {0}; double theta_jmin_thread=0, theta_jmax_thread=0; char flash_file[200]=""; char log_file[200]=""; FILE *fPtr=NULL; //pointer to log file for each thread double *xPtr=NULL, *yPtr=NULL, *rPtr=NULL, *thetaPtr=NULL, *velxPtr=NULL, *velyPtr=NULL, *densPtr=NULL, *presPtr=NULL, *gammaPtr=NULL, *dens_labPtr=NULL; double *szxPtr=NULL,*szyPtr=NULL, *tempPtr=NULL; //pointers to hold data from FLASH files double *phiPtr=NULL, *velzPtr=NULL, *zPtr=NULL, *all_time_steps=NULL ; int num_ph=0, scatt_synch_num_ph=0, num_null_ph=0, array_num=0, ph_scatt_index=0, num_photons_find_new_element=0, max_scatt=0, min_scatt=0,i=0; //number of photons produced in injection algorithm, number of array elleemnts from reading FLASH file, index of photon whch does scattering, generic counter double dt_max=0, thescatt=0, accum_time=0; double gamma_infinity=0, time_now=0, time_step=0, avg_scatt=0,avg_r=0; //gamma_infinity not used? double ph_dens_labPtr=0, ph_vxPtr=0, ph_vyPtr=0, ph_tempPtr=0, ph_vzPtr=0;// *ph_cosanglePtr=NULL ; double min_r=0, max_r=0, min_theta=0, max_theta=0, nu_c_scatt=0, n_comptonized=0; int frame=0, scatt_frame=0, frame_scatt_cnt=0, frame_abs_cnt=0, scatt_framestart=0, framestart=0; struct photon *phPtr=NULL; //pointer to array of photons int angle_count=0, num_ph_emit=0; int num_angles=0, old_num_angle_procs=0; //old_num_angle_procs is to hold the old number of procs in each angle when cont sims, if restarting sims this gets set to angle_procs int *frame_array=NULL, *proc_frame_array=NULL, *element_num=NULL, *sorted_indexes=NULL, proc_frame_size=0; double *thread_theta=NULL; //saves ranges of thetas for each thread to go through double delta_theta=1; int myid, numprocs, angle_procs, angle_id, procs_per_angle; int temporary[3]={0}, tempo=0; //new OpenMPI stuff MPI_Init(NULL,NULL); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); //new muliple threads injecting and propagating photons const gsl_rng_type *rng_t; gsl_rng *rng; gsl_rng_env_setup(); rng_t = gsl_rng_ranlxs0; rng = gsl_rng_alloc (rng_t); //initalize random number generator to seed the others with random numbers //want to break up simulation by angle and injection frame & have each thread save data in its own folder //have each thread check if its directory is made and if its restarting (delete evrything) or if its continuing with a previous simulation //the angle and the injection frames will be the names of mc_dir, therefore read mc.par first in MC_XXX directory //make strings of proper directories etc. snprintf(flash_prefix,sizeof(flash_prefix),"%s%s",FILEPATH,FILEROOT ); snprintf(mc_file,sizeof(flash_prefix),"%s%s%s",FILEPATH, MC_PATH,MCPAR); printf(">> MCRaT: Reading mc.par: %s\n", mc_file); readMcPar(mc_file, &hydro_domain_x, &hydro_domain_y, &fps, &theta_jmin, &theta_jmax, &delta_theta, &inj_radius_small,&inj_radius_large, &frm0_small,&frm0_large, &last_frm ,&frm2_small, &frm2_large, &ph_weight_small, &ph_weight_large, &min_photons, &max_photons, &spect, &restrt); //thetas that comes out is in degrees //printf("%c\n", restrt); //divide up angles and frame injections among threads DONT WANT NUMBER OF THREADS TO BE ODD //assign ranges to array that hold them //leave angles in degrees here num_angles=(int) (((theta_jmax-theta_jmin)/delta_theta)) ;//*(180/M_PI)); thread_theta=malloc( num_angles *sizeof(double) ); *(thread_theta+0)=theta_jmin;//*(180/M_PI); //printf("%e\n", *(thread_theta+0)); for (j=1;j<(num_angles); j++) { *(thread_theta+j)=*(thread_theta+(j-1))+delta_theta; //printf("%e\n", *(thread_theta+j)); } //make comm without the procs that deal with angle //comm for angles procs_per_angle= numprocs/num_angles; //printf("%d\n", procs_per_angle); MPI_Comm angle_comm; if (restrt=='r') //uncomment this when I run MCRAT for sims that didnt originally save angle_procs { MPI_Comm_split(MPI_COMM_WORLD, myid/procs_per_angle , myid, &angle_comm); MPI_Comm_rank(angle_comm, &angle_id); MPI_Comm_size(angle_comm, &angle_procs); //printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n", myid, numprocs, angle_id, angle_procs); theta_jmin_thread= (*(thread_theta+ (myid/procs_per_angle))) *(M_PI/180); theta_jmax_thread= theta_jmin_thread+(delta_theta*(M_PI/180)); snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this old_num_angle_procs=angle_procs; } else { MPI_Group sub_world_group; MPI_Comm sub_world_comm; int incl_procs[procs_per_angle*num_angles], count, sub_world_id; int total_num_to_restart=0; int color=1; int *all_cont_process_idPtr=NULL, *each_num_to_restart_per_anglePtr=NULL, *tmp=NULL; //for restart='c' case if the number of processes isnt a multiple of procs_per_angle*num_angles make a comm out of those that are in order to analyze files and count number of processes for each angle range need to con't count=0; for (j=0;j<numprocs;j++) { if (j<procs_per_angle*num_angles) { incl_procs[count]=j; count++; } } if (myid<procs_per_angle*num_angles) { int myid_2=0; // Get the group of processes in MPI_COMM_WORLD and make a sub group to go through checkpoint files MPI_Group world_group; MPI_Comm root_angle_comm; MPI_Comm_group(MPI_COMM_WORLD, &world_group); MPI_Group_incl(world_group, procs_per_angle*num_angles, incl_procs, &sub_world_group); MPI_Comm_create_group(MPI_COMM_WORLD, sub_world_group, 0, &sub_world_comm); MPI_Comm_rank(sub_world_comm, &myid_2); MPI_Comm_split(sub_world_comm, myid_2/procs_per_angle , myid_2, &angle_comm); MPI_Comm_rank(angle_comm, &angle_id); MPI_Comm_size(angle_comm, &angle_procs); //create group of all the processes that have angle_id==0 if (angle_id==0) { color=0; //set different color for root processes in each group of angle_comm } MPI_Comm_split(sub_world_comm, color , myid_2, &root_angle_comm); //create comm to exchange info about number of processes to restart for each angle range printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n", myid, numprocs, angle_id, angle_procs); theta_jmin_thread= (*(thread_theta+ (myid_2/procs_per_angle))) *(M_PI/180); theta_jmax_thread= theta_jmin_thread+(delta_theta*(M_PI/180)); snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this //call the function to count the num of processes for each angle range that need to be con't int count_cont_procs=0, total_cont_procs_angle=0, global_cont_procs=0; int *cont_proc_idsPtr=NULL, *total_cont_procs_angle_Ptr=NULL, *displPtr=NULL; //becomes the size of the number of old procceses int *cont_proc_ids_anglePtr=NULL; old_num_angle_procs=getOrigNumProcesses(&count_cont_procs, &cont_proc_idsPtr, mc_dir, angle_id, angle_procs, last_frm); //count_cont_procs=1;//just for testing purposes if (old_num_angle_procs==-1) { printf("MCRAT wasnt able to get a value of old_num_angle_procs to continue the simulation. Now exiting to prevent data corruption.\n" ); MPI_Abort(MPI_COMM_WORLD, 1); } total_cont_procs_angle_Ptr=malloc(angle_procs*sizeof(int)); displPtr=malloc(angle_procs*sizeof(int)); MPI_Gather(&count_cont_procs,1,MPI_INT, total_cont_procs_angle_Ptr, 1, MPI_INT, 0,angle_comm );//hold the number of elements that each process will send the root process MPI_Barrier(angle_comm); MPI_Barrier(sub_world_comm); //if (angle_id==0) //{ // printf("Angle_procs: %d 1st gather: %d, %d, %d\n", angle_procs, *(total_cont_procs_angle_Ptr), *(total_cont_procs_angle_Ptr+1), *(total_cont_procs_angle_Ptr+2)); //} MPI_Reduce(&count_cont_procs, &total_cont_procs_angle, 1, MPI_INT, MPI_SUM, 0, angle_comm); //for each angle sum the number of procs to continue and pass it to the root for angle_comm cont_proc_ids_anglePtr=malloc(total_cont_procs_angle*sizeof(int)); //each root proc in angle comm has to hold the id's of the old set of processes to cont *(displPtr+0)=0; if (angle_id==0) { for (j=1;j<angle_procs;j++) { *(displPtr+j)=(*(displPtr+j-1))+(*(total_cont_procs_angle_Ptr+j-1 )); //set the displacement for each proces to put its vector of pprocess IDs that need to be continued printf("Displacement: %d\n", *(displPtr+j)); } } MPI_Gatherv(cont_proc_idsPtr,count_cont_procs,MPI_INT, cont_proc_ids_anglePtr, total_cont_procs_angle_Ptr, displPtr , MPI_INT, 0,angle_comm ); //send the vectors with the ids of the old processes that need to be cont to root in angle_comm MPI_Barrier(angle_comm); MPI_Barrier(sub_world_comm); if (angle_id==0) { printf("Total Cont Procs: %d\n", total_cont_procs_angle); for (j=0;j<total_cont_procs_angle;j++) { { printf("Number: %d ID: %d\n",j, *(cont_proc_ids_anglePtr+j)); } } } //each root for angle_comm has the number of processes each angle range needs to restart and the array of what the IDs of those processes used to be //now have to combine all that info for rank 0 in MPI_COMM_WORLD and then end it to all processes in MPI_COMM_WORLD //if (myid==0) { free(displPtr); displPtr=NULL; //initalize variables to hold all data each_num_to_restart_per_anglePtr=malloc(num_angles*sizeof(int)); displPtr=malloc(num_angles*sizeof(int)); *(displPtr+0)=0; } MPI_Barrier(angle_comm); MPI_Barrier(sub_world_comm); if (angle_id==0) { //this is the part where all the root processes of angle_comm transfer thier info to the root proc of MPI_WORLD MPI_Reduce(&total_cont_procs_angle, &total_num_to_restart, 1, MPI_INT, MPI_SUM, 0, root_angle_comm); //for each angle sum the number of procs to continue and pass it to the root for MPI_COMM_WORLD MPI_Gather(&total_cont_procs_angle,1,MPI_INT, each_num_to_restart_per_anglePtr, 1, MPI_INT, 0,root_angle_comm );//hold the number of elements that each process sent the root for MPI_COMM_WORLD if (myid==0) { for (j=1;j<num_angles;j++) { *(displPtr+j)=(*(displPtr+j-1))+(*(each_num_to_restart_per_anglePtr+j-1 )); //set the displacement for each proces to put its vector of pprocess IDs that need to be continued } } all_cont_process_idPtr=malloc(total_num_to_restart*sizeof(int)); MPI_Gatherv(cont_proc_ids_anglePtr, total_cont_procs_angle, MPI_INT, all_cont_process_idPtr, each_num_to_restart_per_anglePtr, displPtr, MPI_INT, 0, root_angle_comm); } MPI_Barrier(angle_comm); MPI_Barrier(sub_world_comm); if (myid==0) { printf("Global Cont Procs: %d\n", total_num_to_restart); for (j=0;j<total_num_to_restart;j++) { { printf("Global ID: %d\n", *(all_cont_process_idPtr+j)); } } } //destroy the old comms MPI_Barrier(angle_comm); MPI_Barrier(sub_world_comm); //destroy current angle comm and recreate a new one MPI_Comm_free(&root_angle_comm); MPI_Comm_free(&angle_comm); MPI_Comm_free(&sub_world_comm); MPI_Group_free(&sub_world_group); MPI_Group_free(&world_group); free(cont_proc_idsPtr); free(cont_proc_ids_anglePtr); free(total_cont_procs_angle_Ptr); free(displPtr); //free(each_num_to_restart_per_anglePtr); //free(all_cont_process_idPtr); } //send all of myid==0 data to all processes in MPI_COMM_WORLD MPI_Bcast( &total_num_to_restart, 1, MPI_INT, 0, MPI_COMM_WORLD ); if (total_num_to_restart>0) { if (myid != 0 ) { printf("Proc: %d, Global Cont Procs: %d\n", myid, total_num_to_restart); //allocate data of appropriate size for all processes to hold the data from MPI_Bcast tmp=realloc(all_cont_process_idPtr,total_num_to_restart *sizeof(int)); if (tmp!=NULL) { all_cont_process_idPtr=tmp; } else { printf("Error with reserving space to hold data about restarting process ID's\n"); } //free(tmp); printf("Proc: %d, Num_angles: %d\n", myid, num_angles); tmp=realloc(each_num_to_restart_per_anglePtr, num_angles*sizeof(int)); if (tmp!=NULL) { each_num_to_restart_per_anglePtr=tmp; } else { printf("Error with reserving space to hold data about restarting process numbers for each angle range\n"); } //free(tmp); } MPI_Barrier(MPI_COMM_WORLD); MPI_Bcast( all_cont_process_idPtr, total_num_to_restart, MPI_INT, 0, MPI_COMM_WORLD ); MPI_Bcast( each_num_to_restart_per_anglePtr, num_angles, MPI_INT, 0, MPI_COMM_WORLD ); MPI_Bcast( &old_num_angle_procs, 1, MPI_INT, 0, MPI_COMM_WORLD ); MPI_Barrier(MPI_COMM_WORLD); if (myid==numprocs-1) { printf("Number of processes: %d\n", old_num_angle_procs); //printf("restarting process numbers for each angle range: %d, %d, %d\n", *(each_num_to_restart_per_anglePtr), *(each_num_to_restart_per_anglePtr+1), *(each_num_to_restart_per_anglePtr+2)); } //assign proper number of processes to each angle range to con't sims and then reset angle_id to original value from when simulation was first started color=0; //by default all processes have this value count=0; for (j=0;j<num_angles;j++) { if (myid>=count && myid<count+(*(each_num_to_restart_per_anglePtr+j)) ) { color=j; } count+=(*(each_num_to_restart_per_anglePtr+j)); printf("Myid: %d, Color: %d, Count %d, Num To Start Per Angle: %d\n", myid, color, count, (*(each_num_to_restart_per_anglePtr+j))); } if (count!=numprocs) { //if the number of processes needed to continue the simulation is different from the number of processes in the mpiexec call exit printf("The simulation needs %d processes to properly continue. The number of processes initialized was %d.\nThe program is now exiting to prevent data corruption\n.", count, numprocs); exit(2); } MPI_Comm_split(MPI_COMM_WORLD, color , myid, &angle_comm); MPI_Comm_rank(angle_comm, &angle_id); MPI_Comm_size(angle_comm, &angle_procs); printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n", myid, numprocs, angle_id, angle_procs); angle_procs=old_num_angle_procs; //reset the angle for each process theta_jmin_thread= (*(thread_theta+ color)) *(M_PI/180); theta_jmax_thread= theta_jmin_thread+(delta_theta*(M_PI/180)); //reset the angle_id for each process count=0; for (j=0;j<color;j++) { count+=(*(each_num_to_restart_per_anglePtr+j)); } angle_id=(*(all_cont_process_idPtr+count+angle_id)); snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this } else { //if there are no more processes to continue just break up processes normally so they read in checkpoint files of completed processes and jump to merging files MPI_Comm_split(MPI_COMM_WORLD, myid/procs_per_angle , myid, &angle_comm); MPI_Comm_rank(angle_comm, &angle_id); MPI_Comm_size(angle_comm, &angle_procs); } free(all_cont_process_idPtr); free(each_num_to_restart_per_anglePtr); } MPI_Barrier(MPI_COMM_WORLD); if ((theta_jmin_thread >= 0) && (theta_jmax_thread <= (2*M_PI/180) )) //if within small angle (0-2 degrees) use _small inj_radius and frm2 have to think about this for larger domains { inj_radius=inj_radius_small; frm2=frm2_small; frm0=frm0_small; ph_weight_suggest=ph_weight_small; } else { inj_radius=inj_radius_large; frm2=frm2_large; frm0=frm0_large; ph_weight_suggest=ph_weight_large; } //make vector to hold the frames we are injecting in, vector should have (frm2-frm0)/angle_procs slots, if fps is const //angle_procs=1;//just for testing purposes proc_frame_size=ceil((frm2-frm0)/ (float) angle_procs); frame_array=malloc(((frm2-frm0)+1)*sizeof(int)); for (j=0;j<((frm2-frm0)+1); j++) { *(frame_array+j)=frm0+j ; //printf("proc: %d frame: %d\n", angle_id, *(frame_array+j)); } //set this now incase there is no checkpoint file, then this wont be overwritten and the corretc values will be passed even if the user decides to restart framestart=(*(frame_array +(angle_id*proc_frame_size))); scatt_framestart=framestart; if (angle_id != (angle_procs-1)) { frm2=(*(frame_array +((angle_id*proc_frame_size) + proc_frame_size-1) )); //section off blocks of the frame_array to give to each angle_id } else { frm2=(*(frame_array + (frm2-frm0) )); //if angle_id is last give it the last set, even if its uneven } if (restrt=='c') { printf(">> mc.py: Reading checkpoint\n"); //#pragma omp critical scatt_synch_num_ph=readCheckpoint(mc_dir, &phPtr, &frm2, &framestart, &scatt_framestart, &num_ph, &restrt, &time_now, angle_id, &angle_procs); /* for (i=0;i<num_ph;i++) { printf("%e,%e,%e, %e,%e,%e, %e, %e\n",(phPtr+i)->p0, (phPtr+i)->p1, (phPtr+i)->p2, (phPtr+i)->p3, (phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2, (phPtr+i)->num_scatt ); } */ if (restrt=='c') { printf(">> Rank %d: Starting from photons injected at frame: %d out of %d\n", angle_id,framestart, frm2); printf(">> Rank %d with angles %0.1lf-%0.1lf: Continuing scattering %d photons from frame: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,num_ph, scatt_framestart); printf(">> Rank %d with angles %0.1lf-%0.1lf: The time now is: %e\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,time_now); } else { printf(">> Rank %d with angles %0.1lf-%0.1lf: Continuing simulation by injecting photons at frame: %d out of %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,framestart, frm2); //starting with new photon injection is same as restarting sim } } else if ((stat(mc_dir, &st) == -1) && (restrt=='r')) { mkdir(mc_dir, 0777); //make the directory with full permissions } else { if (angle_id==0) { printf(">> proc %d with angles %0.1lf-%0.1lf: Cleaning directory \n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI); dirp = opendir(mc_dir); while ((entry = readdir(dirp)) != NULL) { if (entry->d_type == DT_REG) { /* If the entry is a regular file */ file_count++; //count how many files are in dorectory } } printf("File count %d\n", file_count); //file_count=0; if (file_count>0) { snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_proc_*"); //prepares string to remove *.dat in mc_dir system(mc_operation); snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mcdata_PW_*"); //prepares string to remove *.dat in mc_dir system(mc_operation); snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mcdata_PW*"); //prepares string to remove *.dat in mc_dir system(mc_operation); snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_chkpt_*.dat"); //prepares string to remove *.dat in mc_dir system(mc_operation); snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_output_*.log"); //prepares string to remove *.log in mc_dir system(mc_operation); } } } #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (framestart>=3000) { increment_inj=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 fps_modified=1; //therefore dt between files become 1 second } #else { increment_inj=1; fps_modified=fps; } #endif dt_max=1.0/fps_modified; MPI_Barrier(angle_comm); snprintf(log_file,sizeof(log_file),"%s%s%d%s",mc_dir,"mc_output_", angle_id,".log" ); printf("%s\n",log_file); fPtr=fopen(log_file, "a"); printf( "Im Proc %d with angles %0.1lf-%0.1lf proc_frame_size is %d Starting on Frame: %d Injecting until %d scatt_framestart: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, proc_frame_size, framestart, frm2, scatt_framestart); fprintf(fPtr, "Im Proc %d with angles %0.1lf-%0.1lf Starting on Frame: %d scatt_framestart: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, framestart, scatt_framestart); fflush(fPtr); free(frame_array); //for a checkpoint implementation, start from the last saved "frame" value and go to the saved "frm2" value //#pragma omp for for (frame=framestart;frame<=frm2;frame=frame+increment_inj) { #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (frame>=3000) { increment_inj=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 fps_modified=1; //therefore dt between files become 1 second } #else { increment_inj=1; fps_modified=fps; } #endif if (restrt=='r') { time_now=frame/fps; //for a checkpoint implmentation, load the saved "time_now" value when reading the ckeckpoint file otherwise calculate it normally } //printf(">> mc.py: Working on Frame %d\n", frame); fprintf(fPtr,"Im Proc: %d with angles %0.1lf - %0.1lf Working on Frame: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frame); fflush(fPtr); if (restrt=='r') { //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { //if (strcmp(flash_sim, this_sim)==0) #if SIM_SWITCH == FLASH //{ //if using FLASH data for 2D //put proper number at the end of the flash file modifyFlashName(flash_file, flash_prefix, frame); fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf: Opening FLASH file %s\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, flash_file); fflush(fPtr); readAndDecimate(flash_file, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, min_theta, max_theta, fPtr); //} //else if (strcmp(pluto_amr_sim, this_sim)==0) #elif SIM_SWITCH == PLUTO_CHOMBO //{ modifyPlutoName(flash_file, flash_prefix, frame); fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf: Opening PLUTO file %s\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, flash_file); fflush(fPtr); readPlutoChombo(flash_file, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, min_theta, max_theta, fPtr); //exit(0); //} #else //{ //if using RIKEN hydro data for 2D szx becomes delta r szy becomes delta theta readHydro2D(FILEPATH, frame, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fPtr); //fprintf(fPtr, "%d\n\n", array_num); //} #endif fprintf(fPtr, "Number of Hydro Elements %d\n", array_num); //exit(0); } #else { fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI); fflush(fPtr); read_hydro(FILEPATH, frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fps_modified, fPtr); } #endif //check for run type //if(strcmp(cyl, this_run)==0) #if SIMULATION_TYPE == CYLINDRICAL_OUTFLOW { //printf("In cylindrical prep\n"); cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num); } //else if (strcmp(sph, this_run)==0) #elif SIMULATION_TYPE == SPHERICAL_OUTFLOW { //printf("In Spherical\n"); sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num , fPtr); } //else if (strcmp(struct_sph, this_run)==0) #elif SIMULATION_TYPE == STRUCTURED_SPHERICAL_OUTFLOW { //printf("In Structural Spherical\n"); structuredFireballPrep(rPtr, thetaPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num , fPtr); } #endif //determine where to place photons and how many should go in a given place //for a checkpoint implmentation, dont need to inject photons, need to load photons' last saved data fprintf(fPtr,">> Proc: %d with angles %0.1lf-%0.1lf: Injecting photons\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI); fflush(fPtr); //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { photonInjection(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps_modified, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, szxPtr, szyPtr,rPtr,thetaPtr, tempPtr, velxPtr, velyPtr,rng, fPtr ); } #else { photonInjection3D(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps_modified, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, zPtr, szxPtr, szyPtr,rPtr,thetaPtr, phiPtr, tempPtr, velxPtr, velyPtr, velzPtr, rng, fPtr); } #endif //printf("This many Photons: %d\n",num_ph); //num_ph is one more photon than i actually have //for (i=0;i<num_ph;i++) // printf("%e,%e,%e \n",(phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2 ); } //scatter photons all the way thoughout the jet //for a checkpoint implmentation, start from the last saved "scatt_frame" value eh start_frame=frame or start_frame=cont_frame if (restrt=='r') { scatt_framestart=frame; //have to make sure that once the inner loop is done and the outer loop is incrememnted by one the inner loop starts at that new value and not the one read by readCheckpoint() } num_null_ph=0; for (scatt_frame=scatt_framestart;scatt_frame<=last_frm;scatt_frame=scatt_frame+increment_scatt) { #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (scatt_frame>=3000) { increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 fps_modified=1; //therefore dt between files become 1 second } #else { increment_scatt=1; fps_modified=fps; } #endif dt_max=1.0/fps_modified; //if working with RIKEN files and scatt_frame>=3000 dt is 1 second between each subsequent frame fprintf(fPtr,">>\n"); fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Working on photons injected at frame: %d out of %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,frame, frm2); #if SIMULATION_TYPE == SCIENCE fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Simulation type Science - Working on frame %d\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, scatt_frame); #elif SIMULATION_TYPE == SPHERICAL_OUTFLOW fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Simulation type Spherical Outflow - Working on frame %d\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, scatt_frame); #elif SIMULATION_TYPE == CYLINDRICAL_OUTFLOW fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Simulation type Cylindrical Outflow - Working on frame %d\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, scatt_frame); #elif SIMULATION_TYPE == STRUCTURED_SPHERICAL_OUTFLOW fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Simulation type Structured Spherical Outflow - Working on frame %d\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, scatt_frame); #endif fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Opening file...\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI); fflush(fPtr); //set new seed to increase randomness? gsl_rng_set(rng, gsl_rng_get(rng)); //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { //if (strcmp(flash_sim, this_sim)==0) #if SIM_SWITCH == FLASH { //if using FLASH data for 2D //put proper number at the end of the flash file modifyFlashName(flash_file, flash_prefix, scatt_frame); phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr); fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf: Opening FLASH file %s\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, flash_file); fflush(fPtr); if ((scatt_frame != scatt_framestart) || (restrt=='c')) { //NEED TO DETERMINE IF min_r or max_r is smaller/larger than the rmin/rmax in photonEmitSynch to properly emit photons in the range that the process is interested in //printf("OLD: min_r %e max_r %e\n", min_r, max_r); double test=0; test=calcSynchRLimits( scatt_frame, frame, fps_modified, inj_radius, "min"); //printf("TEST MIN: %e\n", test); min_r=(min_r < test) ? min_r : test ; test=calcSynchRLimits( scatt_frame, frame, fps_modified, inj_radius, "max"); //printf("TEST MAX: %e\n", test); max_r=(max_r > test ) ? max_r : test ; //printf("NEW: min_r %e max_r %e\n", min_r, max_r); } readAndDecimate(flash_file, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, min_theta, max_theta, fPtr); } //else if (strcmp(pluto_amr_sim, this_sim)==0) #elif SIM_SWITCH == PLUTO_CHOMBO { modifyPlutoName(flash_file, flash_prefix, scatt_frame); fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf: Opening PLUTO file %s\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, flash_file); fflush(fPtr); phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr); readPlutoChombo(flash_file, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, min_theta, max_theta, fPtr); //exit(0); } #else { phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr); //if using RIKEN hydro data for 2D szx becomes delta r szy becomes delta theta readHydro2D(FILEPATH, scatt_frame, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fPtr); //fprintf(fPtr, "%d\n\n", array_num); } #endif //fprintf(fPtr, "Number of Hydo Elements %d\n", array_num); //exit(0); } #else { phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr); fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI); fflush(fPtr); read_hydro(FILEPATH, frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\ &thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fps_modified, fPtr); } #endif fprintf(fPtr, "Number of Hydo Elements %d\n", array_num); //check for run type //if(strcmp(cyl, this_run)==0) #if SIMULATION_TYPE == CYLINDRICAL_OUTFLOW { //printf("In cylindrical prep\n"); cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num); } //else if (strcmp(sph, this_run)==0) #elif SIMULATION_TYPE == SPHERICAL_OUTFLOW { sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num, fPtr ); } //else if (strcmp(struct_sph, this_run)==0) #elif SIMULATION_TYPE == STRUCTURED_SPHERICAL_OUTFLOW { //printf("In Structural Spherical\n"); structuredFireballPrep(rPtr, thetaPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num , fPtr); } #endif //printf("The result of read and decimate are arrays with %d elements\n", array_num); //emit synchrotron photons here num_ph_emit=0; //by default want to allocat ememory for time_steps and sorted indexes to scatter all_time_steps=malloc(num_ph*sizeof(double)); sorted_indexes=malloc(num_ph*sizeof(int)); #if SYNCHROTRON_SWITCH == ON if ((scatt_frame != scatt_framestart) || (restrt=='c')) //remember to revert back to != { //if injecting synch photons, emit them if continuing simulation from a point where scatt_frame != scatt_framestart //if necessary, then add memory to then arrays allocated directly above //printf("(phPtr)[0].p0 %e (phPtr)[71].p0 %e\n", (phPtr)[0].p0, (phPtr)[71].p0); fprintf(fPtr, "Emitting Synchrotron Photons in frame %d\n", scatt_frame); #if B_FIELD_CALC == INTERNAL_E fprintf(fPtr, "Calculating the magnetic field using internal energy.\n", scatt_frame); #else //otherwise calculate B from the total energy fprintf(fPtr, "Calculating the magnetic field using the total energy and epsilon_B is set to %lf.\n", EPSILON_B); #endif phScattStats(phPtr, num_ph, &max_scatt, &min_scatt, &avg_scatt, &avg_r, fPtr); //for testing synch photons being emitted where 'i' photons are num_ph_emit=photonEmitSynch(&phPtr, &num_ph, &num_null_ph, &all_time_steps, &sorted_indexes, inj_radius, ph_weight_suggest, max_photons, array_num, fps_modified, theta_jmin_thread, theta_jmax_thread, scatt_frame, frame, xPtr, yPtr, szxPtr, szyPtr,rPtr,thetaPtr, tempPtr, densPtr, velxPtr, velyPtr, rng, 0, 0, fPtr); //printf("(phPtr)[0].p0 %e (phPtr)[71].p0 %e (phPtr)[72].comv_p0 %e (phPtr)[73].comv_p0 %e\n", (phPtr)[0].p0, (phPtr)[71].p0, (phPtr)[72].comv_p0, (phPtr)[73].comv_p0); } #endif fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: propagating and scattering %d photons\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,num_ph-num_null_ph); fflush(fPtr); frame_scatt_cnt=0; frame_abs_cnt=0; find_nearest_grid_switch=1; // set to true so the function findNearestPropertiesAndMinMFP by default finds the index of the grid block closest to each photon since we just read in a file and the prior index is invalid num_photons_find_new_element=0; n_comptonized=0; while (time_now<((scatt_frame+increment_scatt)/fps)) { //if simulation time is less than the simulation time of the next frame, keep scattering in this frame //for RIKEN hydro data, theres still 10 fps but after frame 3000, file increment is 10 not 1, therefore modify dt_max not fps //go through each photon and find blocks closest to each photon and properties of those blocks to calulate mean free path //and choose the photon with the smallest mfp and calculate the timestep num_photons_find_new_element+=findNearestPropertiesAndMinMFP(phPtr, num_ph, array_num, hydro_domain_x, hydro_domain_y, 1, xPtr, yPtr, zPtr, szxPtr, szyPtr, velxPtr, velyPtr, velzPtr, dens_labPtr, tempPtr,\ all_time_steps, sorted_indexes, rng, find_nearest_grid_switch, fPtr); find_nearest_grid_switch=0; //set to zero (false) since we do not absolutely need to refind the index, this makes the function findNearestPropertiesAndMinMFP just check if the photon is w/in the given grid box still //fprintf(fPtr, "In main: %d, %e, Newest Method results: %d, %e\n", ph_scatt_index, time_step, *(sorted_indexes+0), *(all_time_steps+(*(sorted_indexes+0))) ); //fflush(fPtr); //for (i=1;i<num_ph;i++) //{ // fprintf(fPtr, "Newest Method results: %d, %e\n", *(sorted_indexes+i), *(all_time_steps+(*(sorted_indexes+i))) ); //} if (*(all_time_steps+(*(sorted_indexes+0)))<dt_max) { //scatter the photon //fprintf(fPtr, "Passed Parameters: %e, %e, %e\n", (ph_vxPtr), (ph_vyPtr), (ph_tempPtr)); time_step=photonEvent( phPtr, num_ph, dt_max, all_time_steps, sorted_indexes, velxPtr, velyPtr, velzPtr, tempPtr, &ph_scatt_index, &frame_scatt_cnt, &frame_abs_cnt, rng, fPtr ); time_now+=time_step; //see if the scattered phton was a seed photon, if so replenish the seed photon #if SYNCHROTRON_SWITCH == ON if ((phPtr+ph_scatt_index)->type == SYNCHROTRON_POOL_PHOTON) { n_comptonized+=(phPtr+ph_scatt_index)->weight; (phPtr+ph_scatt_index)->type = COMPTONIZED_PHOTON; //c for compton scattered synchrotron photon //fprintf(fPtr, "num_null_ph %d\n", num_null_ph); //printf("The previous scattered photon was a seed photon %c.\n", (phPtr+ph_scatt_index)->type); num_ph_emit+=photonEmitSynch(&phPtr, &num_ph, &num_null_ph, &all_time_steps, &sorted_indexes, inj_radius, ph_weight_suggest, max_photons, array_num, fps_modified, theta_jmin_thread, theta_jmax_thread, scatt_frame, frame, xPtr, yPtr, szxPtr, szyPtr,rPtr,thetaPtr, tempPtr, densPtr, velxPtr, velyPtr, rng, 1, ph_scatt_index, fPtr); //fprintf(fPtr, " num_photon: %d\n",num_ph ); //fflush(fPtr); scatt_synch_num_ph++;//keep track of the number of synch photons that have scattered for later in checking of we need to rebin them //fprintf(fPtr,"photonEmitSynch: scatt_synch_num_ph Number: %d\n", scatt_synch_num_ph); //exit(0); } #endif //phScattStats(phPtr, num_ph, &max_scatt, &min_scatt, &avg_scatt, &avg_r); if ((frame_scatt_cnt%1000 == 0) && (frame_scatt_cnt != 0)) //modified this so it doesn't print when all photons get absorbed at first and frame_scatt_cnt=0 { fprintf(fPtr,"Scattering Number: %d\n", frame_scatt_cnt); fprintf(fPtr,"The local temp is: %e K\n", *(tempPtr + (phPtr+ph_scatt_index)->nearest_block_index) ); fprintf(fPtr,"Average photon energy is: %e ergs\n", averagePhotonEnergy(phPtr, num_ph)); //write function to average over the photons p0 can then do (1.6e-9) to get keV fprintf(fPtr,"The last time step was: %e.\nThe time now is: %e\n", time_step,time_now); //fprintf(fPtr,"Before Rebin: The average number of scatterings thus far is: %lf\nThe average position of photons is %e\n", avg_scatt, avg_r); fflush(fPtr); #if SYNCHROTRON_SWITCH == ON if (scatt_synch_num_ph>max_photons) { //if the number of synch photons that have been scattered is too high rebin them //printf("num_ph_emit: %d\n", num_ph_emit); rebin2dSynchCompPhotons(&phPtr, &num_ph, &num_null_ph, &num_ph_emit, &scatt_synch_num_ph, &all_time_steps, &sorted_indexes, max_photons, theta_jmin_thread, theta_jmax_thread, rng, fPtr); //fprintf(fPtr, "rebinSynchCompPhotons: scatt_synch_num_ph: %d\n", scatt_synch_num_ph); //exit(0); } #endif } //exit(0); } else { time_now+=dt_max; //for each photon update its position based on its momentum updatePhotonPosition(phPtr, num_ph, dt_max, fPtr); } //printf("In main 2: %e, %d, %e, %e\n", ((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now); } #if SYNCHROTRON_SWITCH == ON if ((scatt_frame != scatt_framestart) || (restrt=='c')) //rememebr to change to != also at the other place in the code { if (scatt_synch_num_ph>max_photons) { //rebin the photons to ensure that we have a constant amount here fprintf(fPtr, "Num_ph: %d\n", num_ph); /* fprintf(fPtr,"Before Rebin: The average number of scatterings thus far is: %lf\nThe average position of photons is %e\n", avg_scatt, avg_r); fflush(fPtr); */ rebin2dSynchCompPhotons(&phPtr, &num_ph, &num_null_ph, &num_ph_emit, &scatt_synch_num_ph, &all_time_steps, &sorted_indexes, max_photons, theta_jmin_thread, theta_jmax_thread, rng, fPtr); //exit(0); } //make sure the photons that shou;d be absorbed should be absorbed if we have actually emitted any synchrotron photons if (num_ph_emit>0) { n_comptonized-=phAbsSynch(&phPtr, &num_ph, &frame_abs_cnt, &scatt_synch_num_ph, tempPtr, densPtr, fPtr); } } #endif //get scattering statistics phScattStats(phPtr, num_ph, &max_scatt, &min_scatt, &avg_scatt, &avg_r, fPtr); fprintf(fPtr,"The number of scatterings in this frame is: %d\n", frame_scatt_cnt); #if SYNCHROTRON_SWITCH == ON fprintf(fPtr,"The number of photons absorbed in this frame is: %d\n", frame_abs_cnt); #endif fprintf(fPtr,"The last time step was: %e.\nThe time now is: %e\n", time_step,time_now); fprintf(fPtr,"MCRaT had to refind the position of photons %d times in this frame.\n", num_photons_find_new_element); fprintf(fPtr,"The maximum number of scatterings for a photon is: %d\nThe minimum number of scatterings for a photon is: %d\n", max_scatt, min_scatt); fprintf(fPtr,"The average number of scatterings thus far is: %lf\nThe average position of photons is %e\n", avg_scatt, avg_r); fflush(fPtr); fprintf(fPtr, ">> Proc %d with angles %0.1lf-%0.1lf: Making checkpoint file\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI); fflush(fPtr); fprintf(fPtr, " mc_dir: %s\nframe %d\nfrm2: %d\nscatt_frame: %d\n num_photon: %d\ntime_now: %e\nlast_frame: %d\n", mc_dir, frame, frm2, scatt_frame, num_ph, time_now, last_frm ); fprintf(fPtr,"n_comptonized in this frame is: %e\n ", n_comptonized); fflush(fPtr); save_chkpt_success=saveCheckpoint(mc_dir, frame, frm2, scatt_frame, num_ph, time_now, phPtr, last_frm, angle_id, old_num_angle_procs); if (save_chkpt_success==0) { //if we saved the checkpoint successfully also save the photons to the hdf5 file, else there may be something wrong with the file system printPhotons(phPtr, num_ph, frame_abs_cnt, num_ph_emit, num_null_ph, scatt_synch_num_ph, scatt_frame , frame, last_frm, mc_dir, angle_id, fPtr); } else { fprintf(fPtr, "There is an issue with opening and saving the chkpt file therefore MCRaT is not saving data to the checkpoint or mc_proc files to prevent corruption of those data.\n"); printf("There is an issue with opening and saving the chkpt file therefore MCRaT is not saving data to the checkpoint or mc_proc files to prevent corruption of those data.\n"); fflush(fPtr); exit(1); } //if (frame==last_frm) //{ // exit(0); //} //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if SIM_SWITCH == RIKEN && DIMENSIONS ==3 { //if (RIKEN_SWITCH==1) { free(zPtr);free(phiPtr);free(velzPtr); zPtr=NULL; phiPtr=NULL; velzPtr=NULL; } } #endif free(xPtr);free(yPtr);free(szxPtr);free(szyPtr);free(rPtr);free(thetaPtr);free(velxPtr);free(velyPtr);free(densPtr);free(presPtr); free(gammaPtr);free(dens_labPtr);free(tempPtr); xPtr=NULL; yPtr=NULL; rPtr=NULL;thetaPtr=NULL;velxPtr=NULL;velyPtr=NULL;densPtr=NULL;presPtr=NULL;gammaPtr=NULL;dens_labPtr=NULL; szxPtr=NULL; szyPtr=NULL; tempPtr=NULL; } restrt='r';//set this to make sure that the next iteration of propogating photons doesnt use the values from the last reading of the checkpoint file scatt_synch_num_ph=0; //set this back equal to 0 for next batch of injected/emitted photons starting from nect injection frame num_null_ph=0; //set this back equal to 0 for next batch of injected/emitted photons starting from nect injection frame free(phPtr); phPtr=NULL; free(all_time_steps); all_time_steps=NULL; free(sorted_indexes); sorted_indexes=NULL; } save_chkpt_success=saveCheckpoint(mc_dir, frame, frm2, scatt_frame, 0, time_now, phPtr, last_frm, angle_id, old_num_angle_procs); //this is for processes using the old code that didnt restart efficiently fprintf(fPtr, "Process %d has completed the MC calculation.\n", angle_id); fflush(fPtr); //exit(0); MPI_Barrier(angle_comm); //merge files from each worker thread within a directory { increment_scatt=1; file_count=0; //count number of files for (i=frm0;i<=last_frm;i=i+increment_scatt) { //if ((RIKEN_SWITCH==1) && (strcmp(DIM_SWITCH, dim_3d_str)==0) && (i>=3000)) #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (i>=3000) { increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 } #endif file_count++; } //holds number of files for each process to merge MPI_Comm_size(angle_comm, &angle_procs); //to get the proper number of processes within the group MPI_Comm_rank(angle_comm, &angle_id); //reset the value of angle_id to what it should actualy be to properly distribute files to merge proc_frame_size=floor(file_count/ (float) angle_procs); frame_array=malloc(file_count*sizeof(int)); proc_frame_array=malloc(angle_procs*sizeof(int)); //sets index of each proceesed acquired value element_num=malloc(angle_procs*sizeof(int)); for (i=0;i<angle_procs;i++) { *(proc_frame_array+i)=i*proc_frame_size; *(element_num+i)=1; } //make vector with the files in order to pass them to each of the processes increment_scatt=1; file_count=0; for (i=frm0;i<=last_frm;i=i+increment_scatt) { //if ((RIKEN_SWITCH==1) && (strcmp(DIM_SWITCH, dim_3d_str)==0) && (i>=3000)) #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (i>=3000) { increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 } #endif *(frame_array+file_count)=i ; file_count++; //printf("file_count: %d frame: %d\n", file_count-1, *(frame_array+file_count-1)); } //pass first frame number that each rpocess should start to merge, can calulate the file it should merge until MPI_Scatterv(frame_array, element_num, proc_frame_array, MPI_INT, &frm0, 1, MPI_INT, 0, angle_comm); //fprintf(fPtr, "Value: last_frm: ,%d\n", file_count); //fflush(fPtr); //make sure all files get merged by giving the rest to the last process if (angle_id==angle_procs-1) { proc_frame_size=file_count-proc_frame_size*(angle_procs-1); //for last process take over the remaining number of files } //calculate what the last file the preocess should merge up to i=0; last_frm=frm0; while(i<proc_frame_size) { //if ((RIKEN_SWITCH==1) && (strcmp(DIM_SWITCH, dim_3d_str)==0) && (last_frm>=3000)) #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (last_frm>=3000) { increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 } #else { increment_scatt=1; } #endif last_frm+=increment_scatt; i++; } //if (angle_id==0) { //fprintf(fPtr, ">> Proc %d with angles %0.1lf-%0.1lf: Merging Files from %d to %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm0, last_frm); fprintf(fPtr, ">> Proc %d with angles %0.1lf-%0.1lf: Merging Files from %d to %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm0, last_frm); fflush(fPtr); dirFileMerge(mc_dir, frm0, last_frm, old_num_angle_procs, angle_id, fPtr); } } fprintf(fPtr, "Process %d has completed merging files.\n", angle_id); fflush(fPtr); fclose(fPtr); gsl_rng_free (rng); MPI_Finalize(); //free(rng); //free(thread_theta); return 0; }
DRACC_OMP_004_Counter_no_lock_Intra_yes.c
/* Concurrent access on a counter with no lock. Atomicity Violation. Data Race in line 15. Intra Region. */ #include <stdio.h> #define N 100000 int countervar = 0; int count(){ #pragma omp target map(tofrom:countervar) device(0) #pragma omp teams num_teams(1) #pragma omp distribute parallel for for (int i=0; i<N; i++){ countervar++; } return 0; } int main(){ count(); printf("counter: %i expected: 100000\n ",countervar); return 0; }
DRB030-truedep1-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program has data races due to true dependence within a loop. Data race pair: a[i+1]@68:5 vs. a[i]@68:12 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; #pragma omp parallel for for (i=0;i<len;i++) a[i]=i; for (i=0;i<len-1;i++) a[i+1]=a[i]+1; for (i=0;i<len;i++) printf("%d\n",a[i]); return 0; }
threshold.c
/* Copyright 2014. The Regents of the University of California. * Copyright 2015-2017. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2015-2016 Jon Tamir <jtamir@eecs.berkeley.edu> * 2015 Frank Ong <frankong@berkeley.edu> */ #include <stdbool.h> #include <complex.h> #include "num/flpmath.h" #include "num/multind.h" #include "num/init.h" #include "iter/prox.h" #include "iter/thresh.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/opts.h" #include "lowrank/lrthresh.h" #include "linops/waveop.h" #include "dfwavelet/prox_dfwavelet.h" // FIXME: lowrank interface should not be coupled to mri.h -- it should take D as an input #ifndef DIMS #define DIMS 16 #endif // FIXME: consider moving this to a more accessible location? static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float* out, const complex float* in) { long minsize[D]; md_singleton_dims(D, minsize); long course_scale[3] = MD_INIT_ARRAY(3, 16); md_copy_dims(3, minsize, course_scale); unsigned int wflags = 7; // FIXME for (unsigned int i = 0; i < 3; i++) if (dims[i] < minsize[i]) wflags = MD_CLEAR(wflags, i); long strs[D]; md_calc_strides(D, strs, dims, CFL_SIZE); const struct linop_s* w = linop_wavelet_create(D, wflags, dims, strs, minsize, false); const struct operator_p_s* p = prox_unithresh_create(D, w, lambda, flags); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float* out, const complex float* in) { long blkdims[MAX_LEV][D]; int levels = llr_blkdims(blkdims, ~flags, dims, llrblk); UNUSED(levels); const struct operator_p_s* p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in) { long minsize[3]; md_singleton_dims(3, minsize); long coarse_scale[3] = MD_INIT_ARRAY(3, 16); md_min_dims(3, ~0u, minsize, dims, coarse_scale); complex float res[3]; res[0] = 1.; res[1] = 1.; res[2] = 1.; assert(3 == dims[TE_DIM]); const struct operator_p_s* p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in) { long size = md_calc_size(DIMS, dims) * 2; const float* inf = (const float*)in; float* outf = (float*)out; #pragma omp parallel for for (long i = 0; i < size; i++) outf[i] = inf[i] > lambda ? inf[i] : 0.; } static const char usage_str[] = "lambda <input> <output>"; static const char help_str[] = "Perform (soft) thresholding with parameter lambda."; int main_threshold(int argc, char* argv[]) { unsigned int flags = 0; enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE; int llrblk = 8; const struct opt_s opts[] = { OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"), OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"), OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"), OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"), OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"), OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); const int N = DIMS; long dims[N]; complex float* idata = load_cfl(argv[2], N, dims); complex float* odata = create_cfl(argv[3], N, dims); float lambda = atof(argv[1]); switch (th_type) { case WAV: wthresh(N, dims, lambda, flags, odata, idata); break; case LLR: lrthresh(N, dims, llrblk, lambda, flags, odata, idata); break; case DFW: dfthresh(N, dims, lambda, odata, idata); break; case HARD: hard_thresh(N, dims, lambda, odata, idata); break; default: md_zsoftthresh(N, dims, lambda, flags, odata, idata); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); return 0; }
compiler_cgen.c
/* Generated by Nim Compiler v0.15.0 */ /* (c) 2016 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 32 #include "nimbase.h" #include <string.h> typedef struct Tcgen531027 Tcgen531027; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct Ropeobj180006 Ropeobj180006; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct Cell47304 Cell47304; typedef struct Cellseq47320 Cellseq47320; typedef struct Gcheap49818 Gcheap49818; typedef struct Gcstack49816 Gcstack49816; typedef struct Memregion29485 Memregion29485; typedef struct Smallchunk29439 Smallchunk29439; typedef struct Llchunk29479 Llchunk29479; typedef struct Bigchunk29441 Bigchunk29441; typedef struct Intset29414 Intset29414; typedef struct Trunk29410 Trunk29410; typedef struct Avlnode29483 Avlnode29483; typedef struct Gcstat49814 Gcstat49814; typedef struct Cellset47316 Cellset47316; typedef struct Pagedesc47312 Pagedesc47312; typedef struct Ttypeseq294836 Ttypeseq294836; typedef struct Ttype294840 Ttype294840; typedef struct Intset270030 Intset270030; typedef struct Trunk270026 Trunk270026; typedef struct Trunkseq270028 Trunkseq270028; typedef struct Tpasscontext343002 Tpasscontext343002; typedef struct Tsym294834 Tsym294834; typedef struct Tidobj201004 Tidobj201004; typedef struct TNimObject TNimObject; typedef struct TY294929 TY294929; typedef struct Tstrtable294806 Tstrtable294806; typedef struct Tsymseq294804 Tsymseq294804; typedef struct Tident201010 Tident201010; typedef struct Tlineinfo193336 Tlineinfo193336; typedef struct Tnode294802 Tnode294802; typedef struct Tloc294816 Tloc294816; typedef struct Tlib294820 Tlib294820; typedef struct TY531153 TY531153; typedef struct TY205018 TY205018; typedef struct Tidtable294850 Tidtable294850; typedef struct Tidpairseq294848 Tidpairseq294848; typedef struct Tlinkedlist148013 Tlinkedlist148013; typedef struct Tlistentry148007 Tlistentry148007; typedef struct Tcproc531021 Tcproc531021; typedef struct Tnodetable294862 Tnodetable294862; typedef struct Tnodepairseq294860 Tnodepairseq294860; typedef struct Debuginfo205009 Debuginfo205009; typedef struct TY205021 TY205021; typedef struct TY205023 TY205023; typedef struct Tnodeseq294796 Tnodeseq294796; typedef struct TY193350 TY193350; typedef struct TY531095 TY531095; typedef struct Trodreader334021 Trodreader334021; typedef struct TY294960 TY294960; typedef struct TY205017 TY205017; typedef struct Enumdesc205007 Enumdesc205007; typedef struct Tinfocc275008 Tinfocc275008; typedef struct Tblock531019 Tblock531019; typedef struct Ttraversalclosure539019 Ttraversalclosure539019; typedef struct TY136002 TY136002; typedef struct Tbitset341004 Tbitset341004; typedef struct TY193612 TY193612; typedef struct Tfileinfo193334 Tfileinfo193334; typedef struct Tinfoos178035 Tinfoos178035; typedef struct Tinfocpu178476 Tinfocpu178476; typedef struct Tstrentry148009 Tstrentry148009; typedef struct TY129506 TY129506; typedef struct Basechunk29437 Basechunk29437; typedef struct Freecell29429 Freecell29429; typedef struct Tinstantiation294824 Tinstantiation294824; typedef struct Tidpair294846 Tidpair294846; typedef struct Tnodepair294858 Tnodepair294858; typedef struct Filenamemapping205005 Filenamemapping205005; typedef struct TY334033 TY334033; typedef struct Tindex334019 Tindex334019; typedef struct Tiitable301142 Tiitable301142; typedef struct Tiipairseq301140 Tiipairseq301140; typedef struct Table334054 Table334054; typedef struct Keyvaluepairseq334057 Keyvaluepairseq334057; typedef struct Memfile332202 Memfile332202; typedef struct TY294961 TY294961; typedef struct Tiipair301138 Tiipair301138; typedef struct Keyvaluepair334060 Keyvaluepair334060; typedef NU8 Tnimkind3403; typedef NU8 Tnimtypeflag3409Set; typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0); typedef N_NIMCALL_PTR(void*, TY3494) (void* p0); struct TNimType { NI size; Tnimkind3403 kind; Tnimtypeflag3409Set flags; TNimType* base; TNimNode* node; void* finalizer; TY3489 marker; TY3494 deepcopy; }; typedef NU8 Tnimnodekind3405; struct TNimNode { Tnimnodekind3405 kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void); struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct Cell47304 { NI refcount; TNimType* typ; }; struct Cellseq47320 { NI len; NI cap; Cell47304** d; }; typedef Smallchunk29439* TY29500[512]; typedef Trunk29410* Trunkbuckets29412[256]; struct Intset29414 { Trunkbuckets29412 data; }; struct Memregion29485 { NI minlargeobj; NI maxlargeobj; TY29500 freesmallchunks; Llchunk29479* llmem; NI currmem; NI maxmem; NI freemem; NI lastsize; Bigchunk29441* freechunkslist; Intset29414 chunkstarts; Avlnode29483* root; Avlnode29483* deleted; Avlnode29483* last; Avlnode29483* freeavlnodes; NIM_BOOL locked; }; struct Gcstat49814 { NI stackscans; NI cyclecollections; NI maxthreshold; NI maxstacksize; NI maxstackcells; NI cycletablesize; NI64 maxpause; }; struct Cellset47316 { NI counter; NI max; Pagedesc47312* head; Pagedesc47312** data; }; struct Gcheap49818 { Gcstack49816* stack; void* stackbottom; NI cyclethreshold; Cellseq47320 zct; Cellseq47320 decstack; Cellseq47320 tempstack; NI recgclock; Memregion29485 region; Gcstat49814 stat; Cellset47316 marked; Cellseq47320 additionalroots; }; struct Intset270030 { NI counter; NI max; Trunk270026* head; Trunkseq270028* data; }; struct TNimObject { TNimType* m_type; }; struct Tidobj201004 { TNimObject Sup; NI id; }; typedef NU8 Tsymkind294435; struct Tstrtable294806 { NI counter; Tsymseq294804* data; }; typedef NU16 Tmagic294524; struct Tlineinfo193336 { NI16 line; NI16 col; NI32 fileindex; }; typedef NU32 Tsymflag294184Set; typedef NU32 Toption171009Set; typedef NU8 Tlockind294808; typedef NU8 Tstorageloc294812; typedef NU16 Tlocflag294810Set; struct Tloc294816 { Tlockind294808 k; Tstorageloc294812 s; Tlocflag294810Set flags; Ttype294840* t; Ropeobj180006* r; }; struct Tsym294834 { Tidobj201004 Sup; Tsymkind294435 kind; union{ struct {Ttypeseq294836* typeinstcache; } S1; struct {TY294929* procinstcache; Tsym294834* gcunsafetyreason; } S2; struct {TY294929* usedgenerics; Tstrtable294806 tab; } S3; struct {Tsym294834* guard; NI bitsize; } S4; } kindU; Tmagic294524 magic; Ttype294840* typ; Tident201010* name; Tlineinfo193336 info; Tsym294834* owner; Tsymflag294184Set flags; Tnode294802* ast; Toption171009Set options; NI position; NI offset; Tloc294816 loc; Tlib294820* annex; Tnode294802* constraint; }; struct TY205018 { NimStringDesc* Field0; NI Field1; }; struct Tpasscontext343002 { TNimObject Sup; NIM_BOOL fromcache; }; typedef Ropeobj180006* Tcfilesections531009[18]; typedef NU8 Codegenflag531025Set; struct Tidtable294850 { NI counter; Tidpairseq294848* data; }; struct Tlinkedlist148013 { Tlistentry148007* head; Tlistentry148007* tail; NI counter; }; struct Tnodetable294862 { NI counter; Tnodepairseq294860* data; }; typedef Ropeobj180006* TY531136[10]; struct Tcgen531027 { Tpasscontext343002 Sup; Tcfilesections531009 s; Codegenflag531025Set flags; Tsym294834* module; NimStringDesc* filename; NimStringDesc* cfilename; Ropeobj180006* tmpbase; Tidtable294850 typecache; Tidtable294850 forwtypecache; Intset270030 declaredthings; Intset270030 declaredprotos; Tlinkedlist148013 headerfiles; Intset270030 typeinfomarker; Tcproc531021* initproc; Tcproc531021* postinitproc; Tcproc531021* preinitproc; Ttypeseq294836* typestack; Tnodetable294862 datacache; Tsymseq294804* forwardedprocs; NI typenodes; NI nimtypes; Ropeobj180006* typenodesname; Ropeobj180006* nimtypesname; NI labels; TY531136 extensionloaders; Ropeobj180006* injectstmt; }; struct Debuginfo205009 { NI version; TY205021* files; TY205023* enums; NIM_BOOL conflicts; }; struct Tident201010 { Tidobj201004 Sup; NimStringDesc* s; Tident201010* next; NI h; }; struct Tcproc531021 { Tsym294834* prc; NIM_BOOL beforeretneeded; NIM_BOOL threadvaraccessed; Tlineinfo193336 lastlineinfo; Tnodeseq294796* nestedtrystmts; NI inexceptblock; TY193350* finallysafepoints; NI labels; TY531095* blocks; NI breakidx; Toption171009Set options; NI maxframelen; Tcgen531027* module; NI withinloop; NI splitdecls; NI gcframeid; Ropeobj180006* gcframetype; }; typedef NU8 Tsymflag294184; typedef NU8 Codegenflag531025; typedef NU8 Toption171009; typedef NU64 Tglobaloption171013Set; typedef NU8 Tglobaloption171013; typedef NU8 Tcommands171076; typedef NU16 Tnodeflag294427Set; typedef NU8 Tnodekind294020; struct Tnode294802 { Ttype294840* typ; Tlineinfo193336 info; Tnodeflag294427Set flags; Tnodekind294020 kind; union{ struct {NI64 intval; } S1; struct {NF floatval; } S2; struct {NimStringDesc* strval; } S3; struct {Tsym294834* sym; } S4; struct {Tident201010* ident; } S5; struct {Tnodeseq294796* sons; } S6; } kindU; NimStringDesc* comment; }; typedef Ropeobj180006* TY535289[1]; typedef NU8 Tlocflag294810; struct Tlistentry148007 { TNimObject Sup; Tlistentry148007* prev; Tlistentry148007* next; }; typedef NU8 Tlibkind294818; struct Tlib294820 { Tlistentry148007 Sup; Tlibkind294818 kind; NIM_BOOL generated; NIM_BOOL isoverriden; Ropeobj180006* name; Tnode294802* path; }; typedef NU8 Tcfilesection531005; typedef NU8 Ttypekind294244; typedef NU8 Tcallingconvention294002; typedef NU32 Ttypeflag294431Set; struct Ttype294840 { Tidobj201004 Sup; Ttypekind294244 kind; Tcallingconvention294002 callconv; Ttypeflag294431Set flags; Ttypeseq294836* sons; Tnode294802* n; Tsym294834* owner; Tsym294834* sym; Tsym294834* destructor; Tsym294834* deepcopy; Tsym294834* assignment; TY294960* methods; NI64 size; NI16 align; NI16 locklevel; Tloc294816 loc; }; typedef Ropeobj180006* TY534811[2]; typedef NU8 Tctypekind531007; typedef NU64 Ttypekind294244Set; typedef NU8 Ttypeflag294431; typedef NimStringDesc* TY535943[14]; typedef NU8 Tprefereddesc322011; typedef Ropeobj180006* TY180507[1]; struct Enumdesc205007 { NI size; NU32 owner; NI id; NimStringDesc* name; TY205017* values; }; typedef Ropeobj180006* TY537235[4]; typedef NimStringDesc* TY294016[10]; typedef Ropeobj180006* TY537238[3]; struct Ropeobj180006 { TNimObject Sup; Ropeobj180006* left; Ropeobj180006* right; NI length; NimStringDesc* data; }; typedef NU8 Tinfoccprop275004Set; struct Tinfocc275008 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; NimStringDesc* Field12; NimStringDesc* Field13; NimStringDesc* Field14; NimStringDesc* Field15; NimStringDesc* Field16; NimStringDesc* Field17; NimStringDesc* Field18; NimStringDesc* Field19; Tinfoccprop275004Set Field20; }; typedef Tinfocc275008 TY275427[13]; typedef NU8 Tsystemcc275002; typedef NU8 Tnodeflag294427; typedef NU8 Tcprocsection531011; typedef Ropeobj180006* Tcprocsections531013[3]; struct Tblock531019 { NI id; Ropeobj180006* label; Tcprocsections531013 sections; NIM_BOOL isloop; NI16 nestedtrystmts; NI16 nestedexceptstmts; NI16 framelen; }; typedef NU8 Tgcmode171080; typedef NU8 Ttypeinforeason539016; struct Ttraversalclosure539019 { Tcproc531021* p; NimStringDesc* visitorfrmt; }; typedef NU8 Ttypefieldresult322145; typedef NU8 Tinfoccprop275004; typedef Ropeobj180006* TY538847[6]; typedef Ropeobj180006* TY538401[7]; typedef Ropeobj180006* TY538475[5]; typedef NU16 Tmsgkind193002; typedef NU8 Tassignmentflag540302Set; typedef NU8 Tassignmentflag540302; typedef NimStringDesc* TY554655[19]; typedef NimStringDesc* TY553642[3]; typedef NimStringDesc* TY558764[4]; typedef NimStringDesc* TY553828[42]; typedef NimStringDesc* TY553281[7]; typedef NU8 Trenderflag313004Set; typedef NimStringDesc* TY559052[2]; typedef NU8 Tclosuretypekind537679; typedef NimStringDesc* TY558428[6]; typedef NU8 Tanalysisresult475003; typedef NU8 char136Set[32]; typedef NU8 Tdistinctcompare326427; typedef NU8 Ttypecmpflag326429Set; typedef NU16 Tspecialword277003; typedef NU8 Tsystemos178004; struct Tfileinfo193334 { NimStringDesc* fullpath; NimStringDesc* projpath; NimStringDesc* shortname; Ropeobj180006* quotedname; Ropeobj180006* quotedfullname; TY193350* lines; NimStringDesc* dirtyfile; }; typedef NU8 Tinfoosprop178031Set; struct Tinfoos178035 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; Tinfoosprop178031Set Field12; }; typedef Tinfoos178035 TY178082[24]; typedef NU8 Tendian178474; struct Tinfocpu178476 { NimStringDesc* Field0; NI Field1; Tendian178474 Field2; NI Field3; NI Field4; }; typedef Tinfocpu178476 TY178510[19]; typedef NU8 Tsystemcpu178452; struct Tstrentry148009 { Tlistentry148007 Sup; NimStringDesc* data; }; struct TY129506 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; }; struct Gcstack49816 { Gcstack49816* prev; Gcstack49816* next; void* starts; void* pos; NI maxstacksize; }; struct Basechunk29437 { NI prevsize; NI size; NIM_BOOL used; }; struct Smallchunk29439 { Basechunk29437 Sup; Smallchunk29439* next; Smallchunk29439* prev; Freecell29429* freelist; NI free; NI acc; NF data; }; struct Llchunk29479 { NI size; NI acc; Llchunk29479* next; }; struct Bigchunk29441 { Basechunk29437 Sup; Bigchunk29441* next; Bigchunk29441* prev; NI align; NF data; }; typedef NI TY29418[16]; struct Trunk29410 { Trunk29410* next; NI key; TY29418 bits; }; typedef Avlnode29483* TY29490[2]; struct Avlnode29483 { TY29490 link; NI key; NI upperbound; NI level; }; struct Pagedesc47312 { Pagedesc47312* next; NI key; TY29418 bits; }; struct Trunk270026 { Trunk270026* next; NI key; TY29418 bits; }; struct Tidpair294846 { Tidobj201004* key; TNimObject* val; }; struct Tnodepair294858 { NI h; Tnode294802* key; NI val; }; struct Filenamemapping205005 { NimStringDesc* package; NimStringDesc* file; NU32 mangled; }; typedef NU8 Treasonforrecompile334002; struct Tiitable301142 { NI counter; Tiipairseq301140* data; }; struct Tindex334019 { NI lastidxkey; NI lastidxval; Tiitable301142 tab; NimStringDesc* r; NI offset; }; struct Table334054 { Keyvaluepairseq334057* data; NI counter; }; struct Memfile332202 { void* mem; NI size; int handle; }; struct Trodreader334021 { TNimObject Sup; NI pos; NCSTRING s; Toption171009Set options; Treasonforrecompile334002 reason; TY334033* moddeps; TY334033* files; NI dataidx; NI convertersidx; NI initidx; NI interfidx; NI compilerprocsidx; NI methodsidx; NimStringDesc* filename; Tindex334019 index; Tindex334019 imports; NI readerindex; NI line; NI moduleid; Table334054 syms; Memfile332202 memfile; Tsymseq294804* methods; NimStringDesc* origfile; NIM_BOOL inviewmode; }; struct TY294961 { NI Field0; Tsym294834* Field1; }; struct Freecell29429 { Freecell29429* next; NI zerofield; }; struct Tinstantiation294824 { Tsym294834* sym; Ttypeseq294836* concretetypes; NI compilesid; }; struct Tiipair301138 { NI key; NI val; }; struct Keyvaluepair334060 { NI Field0; NI Field1; Tsym294834* Field2; }; struct Ttypeseq294836 { TGenericSeq Sup; Ttype294840* data[SEQ_DECL_SIZE]; }; struct TY531153 { TGenericSeq Sup; Tcgen531027* data[SEQ_DECL_SIZE]; }; struct Tsymseq294804 { TGenericSeq Sup; Tsym294834* data[SEQ_DECL_SIZE]; }; struct TY205017 { TGenericSeq Sup; TY205018 data[SEQ_DECL_SIZE]; }; struct TY136002 { TGenericSeq Sup; NimStringDesc* data[SEQ_DECL_SIZE]; }; struct Tbitset341004 { TGenericSeq Sup; NI8 data[SEQ_DECL_SIZE]; }; struct TY531095 { TGenericSeq Sup; Tblock531019 data[SEQ_DECL_SIZE]; }; struct TY193350 { TGenericSeq Sup; Ropeobj180006* data[SEQ_DECL_SIZE]; }; struct Tnodeseq294796 { TGenericSeq Sup; Tnode294802* data[SEQ_DECL_SIZE]; }; struct TY193612 { TGenericSeq Sup; Tfileinfo193334 data[SEQ_DECL_SIZE]; }; struct Trunkseq270028 { TGenericSeq Sup; Trunk270026* data[SEQ_DECL_SIZE]; }; struct TY294929 { TGenericSeq Sup; Tinstantiation294824* data[SEQ_DECL_SIZE]; }; struct Tidpairseq294848 { TGenericSeq Sup; Tidpair294846 data[SEQ_DECL_SIZE]; }; struct Tnodepairseq294860 { TGenericSeq Sup; Tnodepair294858 data[SEQ_DECL_SIZE]; }; struct TY205021 { TGenericSeq Sup; Filenamemapping205005 data[SEQ_DECL_SIZE]; }; struct TY205023 { TGenericSeq Sup; Enumdesc205007 data[SEQ_DECL_SIZE]; }; struct TY294960 { TGenericSeq Sup; TY294961 data[SEQ_DECL_SIZE]; }; struct TY334033 { TGenericSeq Sup; NI32 data[SEQ_DECL_SIZE]; }; struct Tiipairseq301140 { TGenericSeq Sup; Tiipair301138 data[SEQ_DECL_SIZE]; }; struct Keyvaluepairseq334057 { TGenericSeq Sup; Keyvaluepair334060 data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d0, NI op0); N_NIMCALL(void, T839829468_2)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0); N_NIMCALL(void, T839829468_3)(void); N_NIMCALL(Ropeobj180006*, rope_180277_2381377266)(NimStringDesc* s0); static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0); static N_INLINE(Cell47304*, usrtocell_51440_1689653243)(void* usr0); static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47304* c0); N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47320* s0, Cell47304* c0); N_NIMCALL(void, T839829468_5)(void); N_NIMCALL(void, T839829468_6)(void); static N_INLINE(void, nimGCunrefNoCycle)(void* p0); N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0); N_NIMCALL(void, T839829468_7)(void); N_NIMCALL(void, initintset_270885_2627731572)(Intset270030* Result); N_NOINLINE(void, chckNil)(void* p0); N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0); N_NIMCALL(void, T839829468_8)(void); N_NIMCALL(Tcgen531027*, newmodule_565045_839829468)(Tsym294834* module0); N_NIMCALL(Tcgen531027*, getcgenmodule_534226_839829468)(Tsym294834* s0); N_NIMCALL(void, internalerror_198113_155036129)(NimStringDesc* errmsg0); N_NIMCALL(NimStringDesc*, HEX24_198185_1689653243)(TY205018 x0); N_NIMCALL(Tcgen531027*, rawnewmodule_565038_839829468)(Tsym294834* module0); N_NIMCALL(Tcgen531027*, rawnewmodule_564663_839829468)(Tsym294834* module0, NimStringDesc* filename0); N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0); static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0); static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0); N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0); N_NIMCALL(NU32, hashowner_534977_839829468)(Tsym294834* s0); N_NIMCALL(NU32, register_205121_1926258066)(Debuginfo205009* self0, NimStringDesc* package0, NimStringDesc* file0); N_NIMCALL(NimStringDesc*, rawNewString)(NI space0); N_NIMCALL(void, initlinkedlist_148031_3771138726)(Tlinkedlist148013* list0); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0); N_NIMCALL(void, initidtable_298019_850551059)(Tidtable294850* x0); N_NIMCALL(Tcproc531021*, newproc_531206_3723162438)(Tsym294834* prc0, Tcgen531027* module0); static N_INLINE(void, asgnRef)(void** dest0, void* src0); static N_INLINE(void, incref_53419_1689653243)(Cell47304* c0); static N_INLINE(void, decref_53001_1689653243)(Cell47304* c0); N_NIMCALL(Toption171009Set, initprocoptions_564635_839829468)(Tcgen531027* m0); N_NIMCALL(Tcproc531021*, newpreinitproc_564625_839829468)(Tcgen531027* m0); N_NIMCALL(Tcproc531021*, newpostinitproc_564630_839829468)(Tcgen531027* m0); N_NIMCALL(void, initnodetable_298085_850551059)(Tnodetable294862* x0); N_NIMCALL(Ropeobj180006*, gettempname_535596_839829468)(Tcgen531027* m0); N_NIMCALL(Ropeobj180006*, HEX26_180418_2381377266)(Ropeobj180006* a0, Ropeobj180006* b0); N_NIMCALL(Ropeobj180006*, rope_180401_2381377266)(NI64 i0); N_NIMCALL(NimStringDesc*, tofullpath_194264_155036129)(NI32 fileidx0); N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0); N_NIMCALL(NimStringDesc*, tofilename_194260_155036129)(NI32 fileidx0); N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0); N_NIMCALL(NimStringDesc*, completecfilepath_275854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0); N_NIMCALL(void, readmergeinfo_532613_2760143328)(NimStringDesc* cfilename0, Tcgen531027* m0); N_NIMCALL(NimStringDesc*, getcfile_565204_839829468)(Tcgen531027* m0); N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0); N_NIMCALL(NimStringDesc*, withpackagename_172073_2607990831)(NimStringDesc* path0); static N_INLINE(NIM_BOOL, skipcodegen_343085_2355241294)(Tnode294802* n0); N_NIMCALL(void, genstmts_541244_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(void, expr_541248_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, fillprocloc_541201_839829468)(Tsym294834* sym0); N_NIMCALL(void, fillloc_534282_839829468)(Tloc294816* a0, Tlockind294808 k0, Ttype294840* typ0, Ropeobj180006* r0, Tstorageloc294812 s0); N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0); N_NIMCALL(Ropeobj180006*, manglename_535205_839829468)(Tsym294834* s0); N_NIMCALL(NIM_BOOL, iskeyword_534960_839829468)(Tident201010* w0); N_NIMCALL(NimStringDesc*, mangle_530847_2036603609)(NimStringDesc* name0); N_NIMCALL(void, add_180487_2381377266)(Ropeobj180006** a0, NimStringDesc* b0); N_NIMCALL(void, add_180482_2381377266)(Ropeobj180006** a0, Ropeobj180006* b0); N_NIMCALL(Ropeobj180006*, HEX25_180905_2381377266)(NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(void, genprocprototype_541254_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(void, useheader_534369_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(NIM_BOOL, includestr_148249_3771138726)(Tlinkedlist148013* list0, NimStringDesc* data0); N_NIMCALL(NimStringDesc*, getstr_299230_850551059)(Tnode294802* a0); N_NIMCALL(Tsym294834*, getmodule_301123_2984716966)(Tsym294834* s0); N_NIMCALL(NIM_BOOL, containsorincl_270862_2627731572)(Intset270030* s0, NI key0); N_NIMCALL(Ropeobj180006*, ropecg_534407_839829468)(Tcgen531027* m0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0); static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0); N_NIMCALL(Ropeobj180006*, cgsym_534403_839829468)(Tcgen531027* m0, NimStringDesc* name0); N_NIMCALL(Tsym294834*, getcompilerproc_340746_3937434831)(NimStringDesc* name0); N_NIMCALL(void, genproc_534951_839829468)(Tcgen531027* m0, Tsym294834* prc0); N_NIMCALL(NIM_BOOL, isactivated_563431_839829468)(Tsym294834* prc0); N_NIMCALL(void, addforwardedproc_534203_839829468)(Tcgen531027* m0, Tsym294834* prc0); N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0); N_NIMCALL(void, genprocnoforward_562906_839829468)(Tcgen531027* m0, Tsym294834* prc0); N_NIMCALL(void, genprocaux_562284_839829468)(Tcgen531027* m0, Tsym294834* prc0); N_NIMCALL(Ropeobj180006*, genprocheader_537867_839829468)(Tcgen531027* m0, Tsym294834* prc0); N_NIMCALL(void, genclinedir_534813_839829468)(Ropeobj180006** r0, Tlineinfo193336 info0); N_NIMCALL(void, genclinedir_534725_839829468)(Ropeobj180006** r0, NimStringDesc* filename0, NI line0); N_NIMCALL(void, addf_181205_2381377266)(Ropeobj180006** c0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, makesinglelinecstring_530835_2036603609)(NimStringDesc* s0); N_NIMCALL(NI, safelinenm_534721_839829468)(Tlineinfo193336 info0); static N_INLINE(NI, tolinenumber_194415_155036129)(Tlineinfo193336 info0); N_NIMCALL(void, genprocparams_536115_839829468)(Tcgen531027* m0, Ttype294840* t0, Ropeobj180006** rettype0, Ropeobj180006** params0, Intset270030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0); N_NIMCALL(NIM_BOOL, isinvalidreturntype_535548_839829468)(Ttype294840* rettype0); N_NIMCALL(Tctypekind531007, maptype_535393_839829468)(Ttype294840* typ0); N_NIMCALL(Tctypekind531007, mapsettype_535389_839829468)(Ttype294840* typ0); N_NIMCALL(NI64, getsize_322135_3876443242)(Ttype294840* typ0); N_NIMCALL(Ttype294840*, lastson_297377_850551059)(Ttype294840* n0); N_NIMCALL(NI64, firstord_322001_3876443242)(Ttype294840* t0); N_NIMCALL(Ttype294840*, skiptypes_298099_850551059)(Ttype294840* t0, Ttypekind294244Set kinds0); N_NIMCALL(NIM_BOOL, isimportedcpptype_535476_839829468)(Ttype294840* t0); N_NIMCALL(NIM_BOOL, needscomplexassignment_535509_839829468)(Ttype294840* typ0); N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_322117_3876443242)(Ttype294840* typ0); static N_INLINE(NIM_BOOL, isobjlackingtypefield_535513_839829468)(Ttype294840* typ0); N_NIMCALL(NIM_BOOL, ispureobject_322138_3876443242)(Ttype294840* typ0); N_NIMCALL(Ropeobj180006*, gettypedescaux_535503_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0); N_NIMCALL(Ttype294840*, getuniquetype_530640_2036603609)(Ttype294840* key0); N_NIMCALL(Ropeobj180006*, gettypepre_535972_839829468)(Tcgen531027* m0, Ttype294840* typ0); N_NIMCALL(Ropeobj180006*, getsimpletypedesc_535936_839829468)(Tcgen531027* m0, Ttype294840* typ0); N_NIMCALL(Ropeobj180006*, typenameorliteral_535898_839829468)(Ttype294840* t0, NimStringDesc* literal0); N_NIMCALL(Ropeobj180006*, gettypename_535313_839829468)(Ttype294840* typ0); N_NIMCALL(Ropeobj180006*, typename_535292_839829468)(Ttype294840* typ0); N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0); N_NIMCALL(Ropeobj180006*, cachegettype_535591_839829468)(Tidtable294850 tab0, Ttype294840* key0); N_NIMCALL(TNimObject*, idtableget_301086_2984716966)(Tidtable294850 t0, Tidobj201004* key0); N_NIMCALL(NimStringDesc*, typetostring_322017_3876443242)(Ttype294840* typ0, Tprefereddesc322011 prefer0); N_NIMCALL(Ttype294840*, elemtype_322394_3876443242)(Ttype294840* t0); N_NIMCALL(Ropeobj180006*, HEX26_180447_2381377266)(Ropeobj180006* a0, NimStringDesc* b0); N_NIMCALL(Ropeobj180006*, gettypeforward_536039_839829468)(Tcgen531027* m0, Ttype294840* typ0); N_NIMCALL(NIM_BOOL, isimportedtype_535449_839829468)(Ttype294840* t0); N_NIMCALL(NimStringDesc*, getforwardstructformat_536015_839829468)(Tcgen531027* m0); N_NIMCALL(Ropeobj180006*, structorunion_536001_839829468)(Ttype294840* t0); N_NIMCALL(void, idtableput_301094_2984716966)(Tidtable294850* t0, Tidobj201004* key0, TNimObject* val0); N_NIMCALL(void, pushtype_535958_839829468)(Tcgen531027* m0, Ttype294840* typ0); N_NIMCALL(Ropeobj180006*, gettypedescweak_536079_839829468)(Tcgen531027* m0, Ttype294840* t0, Intset270030* check0); N_NIMCALL(void, internalerror_198100_155036129)(Tlineinfo193336 info0, NimStringDesc* errmsg0); N_NIMCALL(NIM_BOOL, hasenum_205230_1926258066)(Debuginfo205009 self0, NimStringDesc* ename0, NI id0, NU32 owner0); N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0); static N_INLINE(NI, len_295081_850551059)(Tnode294802* n0); N_NIMCALL(void, registerenum_205419_1926258066)(Debuginfo205009* self0, Enumdesc205007* ed0); N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0); N_NIMCALL(void, appcg_534632_839829468)(Tcgen531027* m0, Ropeobj180006** c0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(NI64, lengthord_322007_3876443242)(Ttype294840* t0); N_NIMCALL(NIM_BOOL, scancppgenericslot_536827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0); N_NIMCALL(Ttype294840*, resolvestarsincpptype_536891_839829468)(Ttype294840* typ0, NI idx0, NI stars0); N_NIMCALL(NI, len_297339_850551059)(Ttype294840* n0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0); N_NIMCALL(Ropeobj180006*, getrecorddesc_536643_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0); N_NIMCALL(Ropeobj180006*, getrecordfields_536636_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0); N_NIMCALL(Ropeobj180006*, genrecordfieldsaux_536421_839829468)(Tcgen531027* m0, Tnode294802* n0, Ropeobj180006* accessexpr0, Ttype294840* rectype0, Intset270030* check0); N_NIMCALL(NI, sonslen_297351_850551059)(Tnode294802* n0); N_NIMCALL(Tnode294802*, lastson_297364_850551059)(Tnode294802* n0); N_NIMCALL(Ropeobj180006*, HEX26_180452_2381377266)(NimStringDesc* a0, Ropeobj180006* b0); N_NIMCALL(Ropeobj180006*, manglerecfieldname_536361_839829468)(Tsym294834* field0, Ttype294840* rectype0); N_NIMCALL(NimStringDesc*, manglefield_534973_839829468)(Tident201010* name0); N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0); N_NIMCALL(Ropeobj180006*, gettupledesc_536777_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0); N_NIMCALL(NI, sonslen_297327_850551059)(Ttype294840* n0); N_NIMCALL(void, excl_270841_2627731572)(Intset270030* s0, NI key0); static N_INLINE(NIM_BOOL, iscompiletimeonly_330706_3876443242)(Ttype294840* t0); N_NIMCALL(Tstorageloc294812, paramstorageloc_536098_839829468)(Tsym294834* param0); N_NIMCALL(NIM_BOOL, ccgintroducedptr_535609_839829468)(Tsym294834* s0); N_NIMCALL(Tctypekind531007, mapreturntype_535445_839829468)(Ttype294840* typ0); N_NIMCALL(Tnode294802*, easyresultasgn_562191_839829468)(Tnode294802* n0); static N_INLINE(Tnode294802*, HEX5BHEX5D_295238_850551059)(Tnode294802* n0, NI i0); N_NIMCALL(Tnode294802*, getbody_337227_1724185294)(Tsym294834* s0); N_NIMCALL(Ropeobj180006*, localvardecl_540532_839829468)(Tcproc531021* p0, Tsym294834* s0); N_NIMCALL(Ropeobj180006*, gettypedesc_537671_839829468)(Tcgen531027* m0, Ttype294840* typ0); N_NIMCALL(void, initlocexprsingleuse_541289_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0); N_NIMCALL(void, initloc_534273_839829468)(Tloc294816* result0, Tlockind294808 k0, Ttype294840* typ0, Tstorageloc294812 s0); N_NIMCALL(void, linefmt_534714_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); static N_INLINE(Ropeobj180006**, s_531179_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0); N_NIMCALL(Ropeobj180006*, indentline_534656_839829468)(Tcproc531021* p0, Ropeobj180006* r0); N_NIMCALL(void, prepend_180893_2381377266)(Ropeobj180006** a0, Ropeobj180006* b0); N_NIMCALL(Ropeobj180006*, rdloc_540188_839829468)(Tloc294816 a0); N_NIMCALL(void, assignlocalvar_540614_839829468)(Tcproc531021* p0, Tsym294834* s0); N_NIMCALL(void, line_534690_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, Ropeobj180006* r0); N_NIMCALL(void, localdebuginfo_540449_839829468)(Tcproc531021* p0, Tsym294834* s0); N_NIMCALL(void, linef_534700_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(Ropeobj180006*, makecstring_193638_155036129)(NimStringDesc* s0); N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0); N_NIMCALL(Ropeobj180006*, gentypeinfo_537941_839829468)(Tcgen531027* m0, Ttype294840* t_537944_839829468); N_NIMCALL(Tcgen531027*, bmod_531201_3723162438)(Tsym294834* module0); N_NIMCALL(void, gentypeinfoauxbase_537960_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0, Ropeobj180006* base0); N_NIMCALL(NIM_BOOL, canformacycle_322123_3876443242)(Ttype294840* typ0); N_NIMCALL(void, gentupleinfo_538549_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0); N_NIMCALL(Ropeobj180006*, getnimnode_537945_839829468)(Tcgen531027* m0); N_NIMCALL(Ttype294840*, fakeclosuretype_539010_839829468)(Tsym294834* owner0); N_NIMCALL(Ttype294840*, newtype_297107_850551059)(Ttypekind294244 kind0, Tsym294834* owner0); N_NIMCALL(void, rawaddson_298394_850551059)(Ttype294840* father0, Ttype294840* son0); N_NIMCALL(void, gentypeinfoaux_538027_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0); N_NIMCALL(Ropeobj180006*, gentraverseproc_539632_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttypeinforeason539016 reason0); N_NIMCALL(void, gentraverseprocseq_539399_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ0); N_NIMCALL(void, gettemp_539032_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816* result0, NIM_BOOL needsinit0); N_NIMCALL(void, constructloc_540388_839829468)(Tcproc531021* p0, Tloc294816 loc0, NIM_BOOL istemp0); static N_INLINE(NIM_BOOL, iscomplexvaluetype_540317_839829468)(Ttype294840* t0); N_NIMCALL(void, usestringh_534345_839829468)(Tcgen531027* m0); N_NIMCALL(Ropeobj180006*, addrloc_540204_839829468)(Tloc294816 a0); N_NIMCALL(void, genobjectinit_540242_839829468)(Tcproc531021* p0, Tcprocsection531011 section0, Ttype294840* t0, Tloc294816 a0, NIM_BOOL takeaddr0); N_NIMCALL(Ttypefieldresult322145, analyseobjectwithtypefield_322149_3876443242)(Ttype294840* t0); N_NIMCALL(Ttype294840*, getsystype_340150_3937434831)(Ttypekind294244 kind0); N_NIMCALL(void, gentraverseproc_539022_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ_539027_839829468); static N_INLINE(Ropeobj180006*, parentobj_539257_839829468)(Ropeobj180006* accessor0, Tcgen531027* m0); N_NIMCALL(void, gentraverseproc_539039_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Tnode294802* n0); N_NIMCALL(void, gencaserange_539028_839829468)(Tcproc531021* p0, Tnode294802* branch0); N_NIMCALL(Ropeobj180006*, genliteral_541273_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Ropeobj180006*, genliteral_551476_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* ty0); N_NIMCALL(Ropeobj180006*, intliteral_541270_839829468)(NI64 i0); N_NIMCALL(Ropeobj180006*, int64literal_551430_839829468)(NI64 i0); N_NIMCALL(Ropeobj180006*, uint64literal_551442_839829468)(NU64 i0); N_NIMCALL(NI, nodetabletestorset_344682_1142335848)(Tnodetable294862* t0, Tnode294802* key0, NI val0); N_NIMCALL(Ropeobj180006*, getstrlit_551468_839829468)(Tcgen531027* m0, NimStringDesc* s0); N_NIMCALL(NimStringDesc*, tostrmaxprecision_300007_3471544153)(NF f0); N_NIMCALL(Tnode294802*, copynode_298528_850551059)(Tnode294802* src0); N_NIMCALL(void, linecg_534707_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(void, genarrayinfo_539005_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0); N_NIMCALL(void, gensetinfo_538867_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0); N_NIMCALL(void, genenuminfo_538597_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0); N_NIMCALL(void, genobjectinfo_538506_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0); N_NIMCALL(void, genobjectfields_538104_839829468)(Tcgen531027* m0, Ttype294840* typ0, Tnode294802* n0, Ropeobj180006* expr0); N_NIMCALL(Ropeobj180006*, discriminatortablename_538057_839829468)(Tcgen531027* m0, Ttype294840* objtype_538060_839829468, Tsym294834* d0); N_NIMCALL(Tsym294834*, lookupinrecord_301119_2984716966)(Tnode294802* n0, Tident201010* field0); N_NIMCALL(NI64, getordvalue_322129_3876443242)(Tnode294802* n0); N_NIMCALL(void, gendeepcopyproc_540066_839829468)(Tcgen531027* m0, Tsym294834* s0, Ropeobj180006* result0); N_NIMCALL(void, initlocalvar_540398_839829468)(Tcproc531021* p0, Tsym294834* v0, NIM_BOOL immediateasgn0); N_NIMCALL(void, fillresult_535865_839829468)(Tsym294834* param0); N_NIMCALL(void, assignparam_540994_839829468)(Tcproc531021* p0, Tsym294834* s0); N_NIMCALL(void, closuresetup_562158_839829468)(Tcproc531021* p0, Tsym294834* prc0); N_NIMCALL(Ropeobj180006*, initgcframe_540435_839829468)(Tcproc531021* p0); N_NIMCALL(Ropeobj180006*, initframe_562140_839829468)(Tcproc531021* p0, Ropeobj180006* procname0, Ropeobj180006* filename0); N_NIMCALL(Ropeobj180006*, quotedfilename_198818_155036129)(Tlineinfo193336 i0); N_NIMCALL(void, appcg_534648_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(Ropeobj180006*, deinitgcframe_540441_839829468)(Tcproc531021* p0); N_NIMCALL(Ropeobj180006*, deinitframe_562150_839829468)(Tcproc531021* p0); N_NIMCALL(Tcgen531027*, findpendingmodule_534241_839829468)(Tcgen531027* m0, Tsym294834* s0); N_NIMCALL(void, symindynamiclib_561929_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(NIM_BOOL, isgetprocaddr_561442_839829468)(Tlib294820* lib0); N_NIMCALL(void, loaddynamiclib_561480_839829468)(Tcgen531027* m0, Tlib294820* lib0); N_NIMCALL(void, libcandidates_172605_2607990831)(NimStringDesc* s0, TY136002** dest0); N_NIMCALL(void, rawmessage_196612_155036129)(Tmsgkind193002 msg0, NimStringDesc* arg0); N_NIMCALL(void, initlocexpr_541283_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0); N_NIMCALL(Ropeobj180006*, mangledynlibproc_540816_839829468)(Tsym294834* sym0); N_NIMCALL(NimStringDesc*, HEX24_180856_2381377266)(Ropeobj180006* r0); N_NIMCALL(void, symindynamiclibpartial_562071_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(void, genvarprototype_541236_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(void, genvarprototypeaux_546254_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(void, declarethreadvar_540676_839829468)(Tcgen531027* m0, Tsym294834* s0, NIM_BOOL isextern0); static N_INLINE(NIM_BOOL, emulatedthreadvars_534949_839829468)(void); static N_INLINE(NIM_BOOL, crossescppboundary_562754_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(void, putlocintodest_541258_839829468)(Tcproc531021* p0, Tloc294816* d0, Tloc294816 s0); N_NIMCALL(void, genassignment_541264_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0); N_NIMCALL(void, genrefassign_540311_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0); static N_INLINE(NIM_BOOL, usesnativegc_171177_2607990831)(void); N_NIMCALL(void, optasgnloc_551788_839829468)(Tloc294816 a0, Ttype294840* t0, Ropeobj180006* field0, Tloc294816* Result); N_NIMCALL(void, genoptasgntuple_552001_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0); N_NIMCALL(void, gengenericasgn_552167_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0); N_NIMCALL(NI, asgncomplexity_551750_839829468)(Tnode294802* n0); N_NIMCALL(void, genoptasgnobject_552084_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0, Tnode294802* t0); N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0); N_NIMCALL(void, localerror_198085_155036129)(Tlineinfo193336 info0, NimStringDesc* arg0); N_NIMCALL(NIM_BOOL, issimpleconst_534311_839829468)(Ttype294840* typ0); N_NIMCALL(void, putintodest_552468_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0, Tstorageloc294812 s0); N_NIMCALL(void, gencomplexconst_560249_839829468)(Tcproc531021* p0, Tsym294834* sym0, Tloc294816* d0); N_NIMCALL(void, requestconstimpl_541240_839829468)(Tcproc531021* p0, Tsym294834* sym0); N_NIMCALL(Ropeobj180006*, genconstexpr_556849_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, tobitset_342001_452470228)(Tnode294802* s0, Tbitset341004** b0); N_NIMCALL(Ropeobj180006*, genrawsetdata_551629_839829468)(Tbitset341004* cs0, NI size0); N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0); N_NIMCALL(NI64, bitsettoword_551578_839829468)(Tbitset341004* s0, NI size0); N_NIMCALL(Ropeobj180006*, genconstseq_561371_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* t0); N_NIMCALL(void, appcg_534640_839829468)(Tcgen531027* m0, Tcfilesection531005 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(Ropeobj180006*, genconstsimplelist_561299_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Ropeobj180006*, gennamedconstexpr_561284_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, accessthreadlocalvar_534945_839829468)(Tcproc531021* p0, Tsym294834* s0); static N_INLINE(Ropeobj180006**, procsec_531194_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0); static N_INLINE(NIM_BOOL, isemptytype_299440_850551059)(Ttype294840* t0); N_NIMCALL(void, putdataintodest_552436_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0); N_NIMCALL(void, genlinedir_534823_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(Ropeobj180006*, sourceline_194068_155036129)(Tlineinfo193336 i0); N_NIMCALL(NIM_BOOL, freshlineinfo_534818_839829468)(Tcproc531021* p0, Tlineinfo193336 info0); N_NIMCALL(void, genmagicexpr_559033_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0); N_NIMCALL(void, genandor_556311_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0); N_NIMCALL(Ropeobj180006*, getlabel_541217_839829468)(Tcproc531021* p0); N_NIMCALL(void, fixlabel_541230_839829468)(Tcproc531021* p0, Ropeobj180006* labl0); N_NIMCALL(void, unaryarith_554646_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0); N_NIMCALL(void, unaryarithoverflow_553633_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0); N_NIMCALL(void, binaryfloatarith_558728_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0); N_NIMCALL(void, binaryarith_553819_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0); N_NIMCALL(void, geneqproc_554214_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, binaryarithoverflow_553262_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0); N_NIMCALL(Ropeobj180006*, binaryarithoverflowraw_553235_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816 a0, Tloc294816 b0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj180006*, rdcharloc_540227_839829468)(Tloc294816 a0); N_NIMCALL(NI64, lastord_322004_3876443242)(Ttype294840* t0); N_NIMCALL(void, genrepr_557339_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(Ropeobj180006*, lenfield_541305_839829468)(Tcproc531021* p0); N_NIMCALL(void, gcusage_556439_839829468)(Tnode294802* n0); N_NIMCALL(void, message_198095_155036129)(Tlineinfo193336 info0, Tmsgkind193002 msg0, NimStringDesc* arg0); N_NIMCALL(NimStringDesc*, rendertree_313044_382274130)(Tnode294802* n0, Trenderflag313004Set renderflags0); N_NIMCALL(void, gengettypeinfo_557383_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genswap_557638_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, unaryexpr_553209_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, binarystmt_552501_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genstrconcat_556452_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genstrappend_556554_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genseqelemappend_556683_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genstrequals_558666_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, binaryexpr_552549_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genisnil_554620_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, gendollar_557391_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genof_557331_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genof_557201_839829468)(Tcproc531021* p0, Tnode294802* x0, Ttype294840* typ0, Tloc294816* d0); N_NIMCALL(void, globalerror_198071_155036129)(Tlineinfo193336 info0, Tmsgkind193002 msg0, NimStringDesc* arg0); N_NIMCALL(Ropeobj180006*, genofhelper_557139_839829468)(Tcproc531021* p0, Ttype294840* dest0, Ropeobj180006* a0); N_NIMCALL(void, gennew_556782_839829468)(Tcproc531021* p0, Tnode294802* e0); N_NIMCALL(void, rawgennew_556741_839829468)(Tcproc531021* p0, Tloc294816 a0, Ropeobj180006* sizeexpr_556745_839829468); N_NIMCALL(void, gennewfinalize_557110_839829468)(Tcproc531021* p0, Tnode294802* e0); N_NIMCALL(void, gennewseq_556824_839829468)(Tcproc531021* p0, Tnode294802* e0); N_NIMCALL(void, gennewseqaux_556795_839829468)(Tcproc531021* p0, Tloc294816 dest0, Ropeobj180006* length0); N_NIMCALL(void, gennewseqofcap_556836_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, gensomecast_558480_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(Ropeobj180006*, getclosuretype_537683_839829468)(Tcgen531027* m0, Ttype294840* t0, Tclosuretypekind537679 kind0); N_NIMCALL(void, genord_558474_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, unaryexprchar_553222_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genarraylen_557415_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0); N_NIMCALL(void, unarystmt_552527_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gensetlengthstr_557632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, gensetlengthseq_557500_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, gensetop_558419_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0); N_NIMCALL(void, binarystmtinexcl_557857_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj180006*, rdsetelemloc_557662_839829468)(Tloc294816 a0, Ttype294840* settype0); N_NIMCALL(void, binaryexprchar_552809_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, geninop_558009_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, fewcmps_557803_839829468)(Tnode294802* s0); N_NIMCALL(void, geninexpraux_555496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0); N_NIMCALL(void, binaryexprin_557837_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gencall_545632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genclosurecall_542452_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0); N_NIMCALL(Ropeobj180006*, genarg_541787_839829468)(Tcproc531021* p0, Tnode294802* n_541790_839829468, Tsym294834* param0, Tnode294802* call0); static N_INLINE(Ropeobj180006*, genargstringtocstring_541776_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Ropeobj180006*, openarrayloc_541665_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Tnode294802*, skipconv_330882_3876443242)(Tnode294802* n0); N_NIMCALL(Tmagic294524, getmagic_320502_2616423590)(Tnode294802* op0); N_NIMCALL(Ropeobj180006*, genargnoparam_541938_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Ropeobj180006*, getrawproctype_542459_839829468)(Tcproc531021* p0, Ttype294840* t0); N_NIMCALL(NIM_BOOL, leftappearsonrightside_541329_839829468)(Tnode294802* le0, Tnode294802* ri0); N_NIMCALL(Tanalysisresult475003, ispartof_475340_788060399)(Tnode294802* a0, Tnode294802* b0); static N_INLINE(NIM_BOOL, hasnoinit_541383_839829468)(Tnode294802* call0); N_NIMCALL(void, resetloc_540350_839829468)(Tcproc531021* p0, Tloc294816* loc0); N_NIMCALL(Ropeobj180006*, addcomma_542464_839829468)(Ropeobj180006* r0); N_NIMCALL(void, geninfixcall_543929_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0); N_NIMCALL(Ropeobj180006*, genpatterncall_543699_839829468)(Tcproc531021* p0, Tnode294802* ri_543702_839829468, NimStringDesc* pat0, Ttype294840* typ_543704_839829468); N_NIMCALL(Ropeobj180006*, genotherarg_541277_839829468)(Tcproc531021* p0, Tnode294802* ri0, NI i0, Ttype294840* typ0); N_NIMCALL(Ropeobj180006*, genthisarg_543475_839829468)(Tcproc531021* p0, Tnode294802* ri_543478_839829468, NI i0, Ttype294840* typ0); N_NIMCALL(Tnode294802*, skipaddrderef_543433_839829468)(Tnode294802* node0); N_NIMCALL(void, fixupcall_541410_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0, Ropeobj180006* callee0, Ropeobj180006* params0); N_NIMCALL(void, gennamedparamcall_544616_839829468)(Tcproc531021* p0, Tnode294802* ri0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0); N_NIMCALL(void, genprefixcall_541960_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0); static N_INLINE(void, poststmtactions_534942_839829468)(Tcproc531021* p0); N_NIMCALL(void, genreset_556731_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, genecho_556369_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0); N_NIMCALL(void, genarrtoseq_557046_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0); N_NIMCALL(void, genseqconstr_557004_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0); N_NIMCALL(void, localerror_198080_155036129)(Tlineinfo193336 info0, Tmsgkind193002 msg0, NimStringDesc* arg0); N_NIMCALL(Tnode294802*, wrapprocforspawn_437501_2218250499)(Tsym294834* owner0, Tnode294802* spawnexpr0, Ttype294840* rettype0, Tnode294802* barrier0, Tnode294802* dest0); N_NIMCALL(Tnode294802*, liftparallel_480822_1773027539)(Tsym294834* owner0, Tnode294802* n0); N_NIMCALL(void, gendeepcopy_552374_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0); N_NIMCALL(NIM_BOOL, isdeepconstexpr_320566_2616423590)(Tnode294802* n0); N_NIMCALL(Ropeobj180006*, gensetnode_551664_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, gensetconstr_559496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0); N_NIMCALL(void, exprcomplexconst_560684_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genarrayconstr_560207_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, handleconstexpr_556853_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, gentupleconstr_559618_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genobjconstr_556903_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(Tsym294834*, lookupfieldagain_555153_839829468)(Tcproc531021* p0, Ttype294840* ty_555156_839829468, Tsym294834* field0, Ropeobj180006** r0); N_NIMCALL(void, genfieldcheck_555504_839829468)(Tcproc531021* p0, Tnode294802* e0, Ropeobj180006* obj0, Tsym294834* field0, Ttype294840* origty0); N_NIMCALL(Tnode294802*, newstrnode_295678_850551059)(Tnodekind294020 kind0, NimStringDesc* strval0); N_NIMCALL(void, gencast_558537_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genconv_558632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, comparetypes_328214_3876443242)(Ttype294840* x0, Ttype294840* y0, Tdistinctcompare326427 cmp0, Ttypecmpflag326429Set flags0); N_NIMCALL(void, genaddr_555051_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); static N_INLINE(NIM_BOOL, iscppref_554807_839829468)(Tcproc531021* p0, Ttype294840* typ0); N_NIMCALL(void, genbracketexpr_556277_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genarrayelem_556093_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, isconstexpr_320510_2616423590)(Tnode294802* n0); N_NIMCALL(void, genopenarrayelem_556169_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0); N_NIMCALL(void, genseqelem_556205_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0); N_NIMCALL(void, gencstringelem_556144_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0); N_NIMCALL(void, gentupleelem_555124_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genderef_545921_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NIM_BOOL enforcederef0); N_NIMCALL(void, genrecordfield_555448_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(Ttype294840*, genrecordfieldaux_555096_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tloc294816* a0); N_NIMCALL(void, gencheckedrecordfield_556046_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0); N_NIMCALL(void, genblock_548083_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(NI, startblock_545978_839829468)(Tcproc531021* p0, NimStringDesc* start0, Ropeobj180006** args0, NI args0Len0); N_NIMCALL(void, endblock_546060_839829468)(Tcproc531021* p0); N_NIMCALL(void, endblock_546035_839829468)(Tcproc531021* p0, Ropeobj180006* blockend0); N_NIMCALL(Ropeobj180006*, blockbody_546025_839829468)(Tblock531019* b0); N_NIMCALL(void, genstmtlistexpr_560402_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genif_546982_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, downconv_560581_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(NI, inheritancediff_328252_3876443242)(Ttype294840* a0, Ttype294840* b0); N_NIMCALL(void, upconv_560431_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genrangechck_558590_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* magic0); N_NIMCALL(void, convstrtocstr_558642_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, convcstrtostr_558654_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, genclosure_559836_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); static N_INLINE(NIM_BOOL, isconstclosure_559810_839829468)(Tnode294802* n0); static N_INLINE(NIM_BOOL, isroutine_299323_850551059)(Tsym294834* s0); N_NIMCALL(void, genwhilestmt_547984_839829468)(Tcproc531021* p0, Tnode294802* t0); static N_INLINE(Ropeobj180006*, assignlabel_546020_839829468)(Tblock531019* b0); N_NIMCALL(NIM_BOOL, stmtscontainpragma_530083_2036603609)(Tnode294802* n0, Tspecialword277003 w0); N_NIMCALL(void, gencomputedgoto_547744_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, genvarstmt_546854_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, gensinglevar_546276_839829468)(Tcproc531021* p0, Tnode294802* a0); N_NIMCALL(void, gengotovar_546258_839829468)(Tcproc531021* p0, Tnode294802* value0); N_NIMCALL(void, assignglobalvar_540819_839829468)(Tcproc531021* p0, Tsym294834* s0); N_NIMCALL(void, varindynamiclib_540812_839829468)(Tcgen531027* m0, Tsym294834* sym0); N_NIMCALL(void, registergcroot_545762_839829468)(Tcproc531021* p0, Tsym294834* v0); N_NIMCALL(Ropeobj180006*, gentraverseprocforglobal_540032_839829468)(Tcgen531027* m0, Tsym294834* s0); static N_INLINE(NIM_BOOL, isassignedimmediately_545781_839829468)(Tnode294802* n0); N_NIMCALL(NIM_BOOL, containshiddenpointer_322120_3876443242)(Ttype294840* typ0); static N_INLINE(void, loadinto_545928_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* a0); N_NIMCALL(void, genasgncall_545695_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0); N_NIMCALL(void, genclosurevar_546832_839829468)(Tcproc531021* p0, Tnode294802* a0); N_NIMCALL(void, genvartuple_545794_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Tnode294802*, lowertupleunpacking_435037_2218250499)(Tnode294802* n0, Tsym294834* owner0); N_NIMCALL(void, genconststmt_546909_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(NIM_BOOL, containscompiletimeonly_330721_3876443242)(Ttype294840* t0); static N_INLINE(NIM_BOOL, emitlazily_534248_839829468)(Tsym294834* s0); N_NIMCALL(void, gencase_549826_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0); N_NIMCALL(void, genstringcase_549416_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0); N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0); N_NIMCALL(void, gencasestringbranch_549100_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, Ropeobj180006* labl0, Ropeobj180006** branches0, NI branches0Len0); N_NIMCALL(NI64, hashstring_530100_2036603609)(NimStringDesc* s0); N_NIMCALL(Ropeobj180006*, gencasesecondpass_548965_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NI labid0, NI until0); N_NIMCALL(void, exprblock_546103_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(void, gencasegeneric_549087_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0); N_NIMCALL(Ropeobj180006*, genifforcaseuntil_549021_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc294816 a0); N_NIMCALL(void, gencasegenericbranch_548910_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj180006* labl0); N_NIMCALL(void, gengotoforcase_547673_839829468)(Tcproc531021* p0, Tnode294802* casestmt0); N_NIMCALL(void, genordinalcase_549724_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0); N_NIMCALL(NI, ifswitchsplitpoint_549615_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(NIM_BOOL, branchhastoobigrange_549575_839829468)(Tnode294802* b0); N_NIMCALL(void, genreturnstmt_547617_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(void, blockleaveactions_547442_839829468)(Tcproc531021* p0, NI howmanytrys0, NI howmanyexcepts0); static N_INLINE(Tnode294802*, pop_320246_1689653243)(Tnodeseq294796** s0); N_NIMCALL(void, genbreakstmt_548444_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(void, genasgn_551239_839829468)(Tcproc531021* p0, Tnode294802* e0, NIM_BOOL fastasgn0); N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_551080_839829468)(Tcproc531021* p0, Tnode294802* asgn0); N_NIMCALL(void, asgnfielddiscriminant_551209_839829468)(Tcproc531021* p0, Tnode294802* e0); N_NIMCALL(void, gendiscriminantcheck_551144_839829468)(Tcproc531021* p0, Tloc294816 a0, Tloc294816 tmp0, Ttype294840* objtype0, Tsym294834* field0); N_NIMCALL(Ropeobj180006*, discriminatortabledecl_538094_839829468)(Tcgen531027* m0, Ttype294840* objtype0, Tsym294834* d0); N_NIMCALL(void, genasmstmt_550659_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(Ropeobj180006*, genasmoremitstmt_550529_839829468)(Tcproc531021* p0, Tnode294802* t0, NIM_BOOL isasmstmt0); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0); N_NIMCALL(void, gentrycpp_549865_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0); static N_INLINE(void, gensimpleblock_546095_839829468)(Tcproc531021* p0, Tnode294802* stmts0); N_NIMCALL(void, gentry_550114_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0); N_NIMCALL(NIM_BOOL, isdefined_202011_1967573533)(NimStringDesc* symbol0); N_NIMCALL(void, line_534695_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* r0); static N_INLINE(Ropeobj180006*, pop_180530_1689653243)(TY193350** s0); N_NIMCALL(void, genraisestmt_548828_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(NimStringDesc*, getraisefrmt_548824_839829468)(Tcproc531021* p0); N_NIMCALL(void, gentypesection_540184_839829468)(Tcgen531027* m0, Tnode294802* n0); N_NIMCALL(void, genpragma_551039_839829468)(Tcproc531021* p_551041_839829468, Tnode294802* n0); N_NIMCALL(Tspecialword277003, whichpragma_320911_2616423590)(Tnode294802* n0); N_NIMCALL(void, genemit_550839_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(Tcfilesection531005, determinesection_550819_839829468)(Tnode294802* n0); N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0); N_NIMCALL(void, genbreakpoint_550862_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(void, genwatchpoint_551016_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(Tsym294834*, skipgenericowner_299279_850551059)(Tsym294834* s0); N_NIMCALL(void, genparforstmt_548208_839829468)(Tcproc531021* p0, Tnode294802* t0); N_NIMCALL(void, genstate_546117_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, gengotostate_546144_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, genbreakstate_546229_839829468)(Tcproc531021* p0, Tnode294802* n0); N_NIMCALL(void, registermoduletomain_564243_839829468)(Tsym294834* m0); N_NIMCALL(Ropeobj180006*, getinitname_564235_839829468)(Tsym294834* m0); N_NIMCALL(Ropeobj180006*, getsomeinitname_563904_839829468)(Tsym294834* m0, NimStringDesc* suffix0); N_NIMCALL(Ropeobj180006*, getdatinitname_564239_839829468)(Tsym294834* m0); N_NIMCALL(Tnode294802*, generatemethoddispatchers_434151_3853300031)(void); N_NIMCALL(void, genmainproc_563729_839829468)(Tcgen531027* m0); N_NIMCALL(Ropeobj180006*, genfilenames_563688_839829468)(Tcgen531027* m0); N_NIMCALL(void, finishmodule_565420_839829468)(Tcgen531027* m0); N_NIMCALL(void, updatecachedmodule_565813_839829468)(Tcgen531027* m0); N_NIMCALL(NIM_BOOL, mergerequired_532832_2760143328)(Tcgen531027* m0); N_NIMCALL(void, mergefiles_533241_2760143328)(NimStringDesc* cfilename0, Tcgen531027* m0); N_NIMCALL(void, geninitcode_564286_839829468)(Tcgen531027* m0); N_NIMCALL(Ropeobj180006*, gensectionstart_532081_2760143328)(Tcprocsection531011 ps0); N_NIMCALL(Ropeobj180006*, gensectionend_532116_2760143328)(Tcprocsection531011 ps0); N_NIMCALL(Ropeobj180006*, gensectionstart_532015_2760143328)(Tcfilesection531005 fs0); N_NIMCALL(Ropeobj180006*, gensectionend_532050_2760143328)(Tcfilesection531005 fs0); N_NIMCALL(void, finishtypedescriptions_537842_839829468)(Tcgen531027* m0); N_NIMCALL(Ropeobj180006*, genmodule_564491_839829468)(Tcgen531027* m0, NimStringDesc* cfile0); N_NIMCALL(Ropeobj180006*, getfileheader_563683_839829468)(NimStringDesc* cfile0); N_NIMCALL(Ropeobj180006*, getcopyright_563665_839829468)(NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, getcompilecfilecmd_276284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0); static N_INLINE(void, addinttypes_563659_839829468)(Ropeobj180006** result0); N_NIMCALL(Ropeobj180006*, genmergeinfo_532203_2760143328)(Tcgen531027* m0); N_NIMCALL(void, generatethreadlocalstorage_540717_839829468)(Tcgen531027* m0); N_NIMCALL(void, generateheaders_562104_839829468)(Tcgen531027* m0); N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0); N_NIMCALL(void, writerope_180836_2381377266)(Ropeobj180006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0); N_NIMCALL(void, addfiletocompile_275863_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, addfiletolink_275872_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, writemodule_565637_839829468)(Tcgen531027* m0, NIM_BOOL pending0); N_NIMCALL(void, generatethreadvarssize_540771_839829468)(Tcgen531027* m0); N_NIMCALL(NIM_BOOL, shouldrecompile_565621_839829468)(Ropeobj180006* code0, NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, toobjfile_275859_2528170400)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, writeropeifnotequal_181511_2381377266)(Ropeobj180006* r0, NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0); N_NIMCALL(void, writemapping_276789_2528170400)(Ropeobj180006* gsymbolmapping0); N_NIMCALL(void, writeheader_565152_839829468)(Tcgen531027* m0); N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY129506* Result); N_NIMCALL(void, resetmodule_564763_839829468)(Tcgen531027* m0); N_NIMCALL(void, nullify_564833_839829468)(Ropeobj180006** arr0); N_NIMCALL(void, nullify_564858_839829468)(Ropeobj180006** arr0); STRING_LITERAL(T839829468_4, "\011", 1); STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17); NIM_CONST TY205018 T839829468_9 = {((NimStringDesc*) &T839829468_10), ((NI) 1158)} ; STRING_LITERAL(T839829468_11, "T", 1); STRING_LITERAL(T839829468_12, "_", 1); STRING_LITERAL(T839829468_13, "added pending module twice: ", 28); STRING_LITERAL(T839829468_14, ".h", 2); STRING_LITERAL(T839829468_15, ".cpp", 4); STRING_LITERAL(T839829468_16, ".m", 2); STRING_LITERAL(T839829468_17, ".c", 2); STRING_LITERAL(T839829468_18, "0", 1); STRING_LITERAL(T839829468_19, "$", 1); STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30); STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15); STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13); STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13); STRING_LITERAL(T839829468_24, "static ", 7); STRING_LITERAL(T839829468_25, "mapType", 7); STRING_LITERAL(T839829468_26, "void", 4); STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24); STRING_LITERAL(T839829468_28, "TY", 2); STRING_LITERAL(T839829468_29, "getTypeName: ", 13); STRING_LITERAL(T839829468_30, "void*", 5); STRING_LITERAL(T839829468_31, "NimStringDesc", 13); STRING_LITERAL(T839829468_32, "NimStringDesc*", 14); STRING_LITERAL(T839829468_33, "NCSTRING", 8); STRING_LITERAL(T839829468_34, "NIM_BOOL", 8); STRING_LITERAL(T839829468_35, "NIM_CHAR", 8); STRING_LITERAL(T839829468_36, "NI", 2); STRING_LITERAL(T839829468_37, "NI8", 3); STRING_LITERAL(T839829468_38, "NI16", 4); STRING_LITERAL(T839829468_39, "NI32", 4); STRING_LITERAL(T839829468_40, "NI64", 4); STRING_LITERAL(T839829468_41, "NF", 2); STRING_LITERAL(T839829468_42, "NF32", 4); STRING_LITERAL(T839829468_43, "NF64", 4); STRING_LITERAL(T839829468_44, "NF128", 5); STRING_LITERAL(T839829468_45, "NU", 2); STRING_LITERAL(T839829468_46, "NU8", 3); STRING_LITERAL(T839829468_47, "NU16", 4); STRING_LITERAL(T839829468_48, "NU32", 4); STRING_LITERAL(T839829468_49, "NU64", 4); NIM_CONST TY535943 Numericaltypetostr_535941_839829468 = {((NimStringDesc*) &T839829468_36), ((NimStringDesc*) &T839829468_37), ((NimStringDesc*) &T839829468_38), ((NimStringDesc*) &T839829468_39), ((NimStringDesc*) &T839829468_40), ((NimStringDesc*) &T839829468_41), ((NimStringDesc*) &T839829468_42), ((NimStringDesc*) &T839829468_43), ((NimStringDesc*) &T839829468_44), ((NimStringDesc*) &T839829468_45), ((NimStringDesc*) &T839829468_46), ((NimStringDesc*) &T839829468_47), ((NimStringDesc*) &T839829468_48), ((NimStringDesc*) &T839829468_49)} ; STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30); STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28); STRING_LITERAL(T839829468_52, "&", 1); STRING_LITERAL(T839829468_53, "*", 1); STRING_LITERAL(T839829468_54, "$1 $2;$n", 8); STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(T839829468_56, "union", 5); STRING_LITERAL(T839829468_57, "struct", 6); STRING_LITERAL(T839829468_58, "getTypeForward(", 15); STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18); STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17); STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18); STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18); STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20); STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(T839829468_65, "N_NIMCALL", 9); STRING_LITERAL(T839829468_66, "N_STDCALL", 9); STRING_LITERAL(T839829468_67, "N_CDECL", 7); STRING_LITERAL(T839829468_68, "N_SAFECALL", 10); STRING_LITERAL(T839829468_69, "N_SYSCALL", 9); STRING_LITERAL(T839829468_70, "N_INLINE", 8); STRING_LITERAL(T839829468_71, "N_NOINLINE", 10); STRING_LITERAL(T839829468_72, "N_FASTCALL", 10); STRING_LITERAL(T839829468_73, "N_CLOSURE", 9); STRING_LITERAL(T839829468_74, "N_NOCONV", 8); NIM_CONST TY294016 Callingconvtostr_535585_839829468 = {((NimStringDesc*) &T839829468_65), ((NimStringDesc*) &T839829468_66), ((NimStringDesc*) &T839829468_67), ((NimStringDesc*) &T839829468_68), ((NimStringDesc*) &T839829468_69), ((NimStringDesc*) &T839829468_70), ((NimStringDesc*) &T839829468_71), ((NimStringDesc*) &T839829468_72), ((NimStringDesc*) &T839829468_73), ((NimStringDesc*) &T839829468_74)} ; STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}" " $1;$n", 69); STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28); STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34); STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31); STRING_LITERAL(T839829468_79, "TGenericSeq", 11); STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39); STRING_LITERAL(T839829468_82, "<", 1); STRING_LITERAL(T839829468_83, " COMMA ", 7); STRING_LITERAL(T839829468_84, "> ", 2); extern NIM_CONST TY275427 Cc_275413_2528170400; STRING_LITERAL(T839829468_85, " {$n", 4); STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24); STRING_LITERAL(T839829468_87, " : public $1 {$n", 16); STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15); STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18); STRING_LITERAL(T839829468_90, "$1.$2", 5); STRING_LITERAL(T839829468_91, "S", 1); STRING_LITERAL(T839829468_92, "struct {", 8); STRING_LITERAL(T839829468_93, "} $1;$n", 7); STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38); STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17); STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18); STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11); STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20); STRING_LITERAL(T839829468_100, "char dummy;$n", 13); STRING_LITERAL(T839829468_101, "};", 2); STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9); STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13); STRING_LITERAL(T839829468_104, "char dummy;", 11); STRING_LITERAL(T839829468_105, "Set", 3); STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18); STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15); STRING_LITERAL(T839829468_109, "genProcParams", 13); STRING_LITERAL(T839829468_110, ", ", 2); STRING_LITERAL(T839829468_111, " ", 1); STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12); STRING_LITERAL(T839829468_113, " Result", 7); STRING_LITERAL(T839829468_114, "void* ClEnv", 11); STRING_LITERAL(T839829468_115, "...", 3); STRING_LITERAL(T839829468_116, "void)", 5); STRING_LITERAL(T839829468_117, ")", 1); STRING_LITERAL(T839829468_118, "(", 1); STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12); STRING_LITERAL(T839829468_120, "proc has no result symbol", 25); STRING_LITERAL(T839829468_121, " register", 9); STRING_LITERAL(T839829468_122, " volatile", 9); STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10); STRING_LITERAL(T839829468_124, "(*$1)", 5); STRING_LITERAL(T839829468_125, ";", 1); STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name " "= $2;$n", 70); STRING_LITERAL(T839829468_127, "NTI$1", 5); STRING_LITERAL(T839829468_128, "(&", 2); STRING_LITERAL(T839829468_129, "TNimType", 8); STRING_LITERAL(T839829468_130, "TNimNode", 8); STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30); STRING_LITERAL(T839829468_132, "0", 1); STRING_LITERAL(T839829468_133, "void*", 5); STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16); STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23); STRING_LITERAL(T839829468_137, "genTypeInfo(", 12); STRING_LITERAL(T839829468_138, "$1[$2]", 6); STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15); STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16); STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29); STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35); STRING_LITERAL(T839829468_147, "$1 a;$n", 7); STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12); STRING_LITERAL(T839829468_149, "LOC", 3); STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13); STRING_LITERAL(T839829468_151, "<string.h>", 10); STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35); STRING_LITERAL(T839829468_153, ".Sup", 4); STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17); STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22); STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35); STRING_LITERAL(T839829468_157, "len", 3); STRING_LITERAL(T839829468_158, "Sup.len", 7); STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(T839829468_160, "}$n", 3); STRING_LITERAL(T839829468_161, "$1.Sup", 6); STRING_LITERAL(T839829468_162, "genTraverseProc", 15); STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18); STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17); STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21); STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16); STRING_LITERAL(T839829468_167, "IL64($1)", 8); STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(T839829468_169, "NIM_TRUE", 8); STRING_LITERAL(T839829468_170, "NIM_FALSE", 9); STRING_LITERAL(T839829468_171, "ULL", 3); STRING_LITERAL(T839829468_172, "(($1) $2)", 9); STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(T839829468_174, "NIM_NIL", 7); STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27); STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23); STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25); STRING_LITERAL(T839829468_179, "genLiteral(", 11); STRING_LITERAL(T839829468_180, "case $1:$n", 10); STRING_LITERAL(T839829468_181, "default:$n", 10); STRING_LITERAL(T839829468_182, "break;$n", 8); STRING_LITERAL(T839829468_183, "} $n", 4); STRING_LITERAL(T839829468_184, "genTraverseProc()", 17); STRING_LITERAL(T839829468_185, "$1.Field$2", 10); STRING_LITERAL(T839829468_186, "$1.ClEnv", 8); STRING_LITERAL(T839829468_187, "$1->data[$2]", 12); STRING_LITERAL(T839829468_188, "a", 1); STRING_LITERAL(T839829468_189, "(*a)", 4); STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(T839829468_191, "$1;$n", 5); STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17); STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17); STRING_LITERAL(T839829468_195, "NI $1;$n", 8); STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32); STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11); STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34); STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26); STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(T839829468_207, "genObjectFields", 15); STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(T839829468_209, "\011return $1;$n", 13); STRING_LITERAL(T839829468_210, "Result", 6); STRING_LITERAL(T839829468_211, "closure generation failed", 25); STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18); STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21); STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18); STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19); STRING_LITERAL(T839829468_216, "$N$1 {$N", 8); STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22); STRING_LITERAL(T839829468_218, "nimFrame", 8); STRING_LITERAL(T839829468_219, "VarSlot", 7); STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25); STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16); STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17); STRING_LITERAL(T839829468_223, "{", 1); STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16); STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51); STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15); STRING_LITERAL(T839829468_227, "}$N", 3); STRING_LITERAL(T839829468_228, "static void* $1;$n", 18); STRING_LITERAL(T839829468_229, "||", 2); STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47); STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57); STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60); STRING_LITERAL(T839829468_233, "loadDynamicLib", 14); STRING_LITERAL(T839829468_234, "Dl_$1", 5); STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21); NIM_CONST TY205018 T839829468_236 = {((NimStringDesc*) &T839829468_10), ((NI) 535)} ; STRING_LITERAL(T839829468_237, "wrong index: ", 13); STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_239, "$2 $1;$n", 8); STRING_LITERAL(T839829468_240, "extern ", 7); STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14); STRING_LITERAL(T839829468_242, " $1;$n", 6); STRING_LITERAL(T839829468_243, "cgsym: ", 7); STRING_LITERAL(T839829468_244, ": ", 2); STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15); STRING_LITERAL(T839829468_246, "extern \"C\" ", 11); STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23); STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26); STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28); STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35); STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34); STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32); STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23); STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35); STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33); STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47); STRING_LITERAL(T839829468_257, ".", 1); STRING_LITERAL(T839829468_258, "ClEnv", 5); STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22); STRING_LITERAL(T839829468_260, "Field$1", 7); STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53); STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50); STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43); STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21); NIM_CONST TY205018 T839829468_264 = {((NimStringDesc*) &T839829468_265), ((NI) 320)} ; STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60); STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63); STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_269, "genAssignment: ", 15); STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48); STRING_LITERAL(T839829468_271, "expr: proc not init ", 20); STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(T839829468_273, "{$n", 3); STRING_LITERAL(T839829468_274, "0x$1,$n", 7); STRING_LITERAL(T839829468_275, "0x$1, ", 6); STRING_LITERAL(T839829468_276, "0x$1}$n", 7); STRING_LITERAL(T839829468_277, "{{$1, $1}", 9); STRING_LITERAL(T839829468_278, ", {", 3); STRING_LITERAL(T839829468_279, ",$n", 3); STRING_LITERAL(T839829468_280, "}", 1); STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 =" " $4;$n", 69); STRING_LITERAL(T839829468_282, "(($1)&$2)", 9); STRING_LITERAL(T839829468_283, "$1,$n", 5); STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(T839829468_285, "expr: var not init ", 19); STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24); STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50); STRING_LITERAL(T839829468_288, "NimTV->", 7); STRING_LITERAL(T839829468_289, "expr: temp not init ", 20); STRING_LITERAL(T839829468_290, "expr: param not init ", 21); STRING_LITERAL(T839829468_291, "expr(", 5); STRING_LITERAL(T839829468_292, "); unknown symbol", 17); STRING_LITERAL(T839829468_293, "//", 2); STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16); STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16); STRING_LITERAL(T839829468_296, "LA", 2); STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18); STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(T839829468_299, "$1: ;$n", 7); STRING_LITERAL(T839829468_300, "!($1)", 5); STRING_LITERAL(T839829468_301, "$1", 2); STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(T839829468_303, "-($1)", 5); STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22); STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19); STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21); STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20); STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22); STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22); STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20); STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19); STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20); STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22); STRING_LITERAL(T839829468_314, "((double) ($1))", 15); STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18); STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18); NIM_CONST TY554655 unarithtab_554653_839829468 = {((NimStringDesc*) &T839829468_300), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_302), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304), ((NimStringDesc*) &T839829468_305), ((NimStringDesc*) &T839829468_306), ((NimStringDesc*) &T839829468_307), ((NimStringDesc*) &T839829468_308), ((NimStringDesc*) &T839829468_309), ((NimStringDesc*) &T839829468_310), ((NimStringDesc*) &T839829468_311), ((NimStringDesc*) &T839829468_312), ((NimStringDesc*) &T839829468_313), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_315), ((NimStringDesc*) &T839829468_316)} ; STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33); STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13); NIM_CONST TY553642 opr_553640_839829468 = {((NimStringDesc*) &T839829468_318), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304)} ; STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22); STRING_LITERAL(T839829468_320, "+", 1); STRING_LITERAL(T839829468_321, "-", 1); STRING_LITERAL(T839829468_322, "/", 1); NIM_CONST TY558764 opr_558762_839829468 = {((NimStringDesc*) &T839829468_320), ((NimStringDesc*) &T839829468_321), ((NimStringDesc*) &T839829468_53), ((NimStringDesc*) &T839829468_322)} ; STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16); STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16); STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13); STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13); STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13); STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(T839829468_341, "($1 == $2)", 10); STRING_LITERAL(T839829468_342, "($1 <= $2)", 10); STRING_LITERAL(T839829468_343, "($1 < $2)", 9); STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(T839829468_351, "($1 != $2)", 10); NIM_CONST TY553828 binarithtab_553826_839829468 = {((NimStringDesc*) &T839829468_325), ((NimStringDesc*) &T839829468_326), ((NimStringDesc*) &T839829468_327), ((NimStringDesc*) &T839829468_328), ((NimStringDesc*) &T839829468_329), ((NimStringDesc*) &T839829468_330), ((NimStringDesc*) &T839829468_331), ((NimStringDesc*) &T839829468_332), ((NimStringDesc*) &T839829468_333), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_336), ((NimStringDesc*) &T839829468_337), ((NimStringDesc*) &T839829468_338), ((NimStringDesc*) &T839829468_339), ((NimStringDesc*) &T839829468_340), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_344), ((NimStringDesc*) &T839829468_345), ((NimStringDesc*) &T839829468_346), ((NimStringDesc*) &T839829468_347), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_348), ((NimStringDesc*) &T839829468_349), ((NimStringDesc*) &T839829468_350), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_351)} ; STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46); STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13); STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13); STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13); STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13); STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13); NIM_CONST TY553281 opr_553279_839829468 = {((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354), ((NimStringDesc*) &T839829468_355), ((NimStringDesc*) &T839829468_356), ((NimStringDesc*) &T839829468_357), ((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354)} ; STRING_LITERAL(T839829468_358, "((NU8)($1))", 11); STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43); STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25); NIM_CONST TY553281 prc64_553274_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361), ((NimStringDesc*) &T839829468_362), ((NimStringDesc*) &T839829468_363), ((NimStringDesc*) &T839829468_364), ((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23); STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23); STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23); STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23); STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23); NIM_CONST TY553281 prc_553269_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366), ((NimStringDesc*) &T839829468_367), ((NimStringDesc*) &T839829468_368), ((NimStringDesc*) &T839829468_369), ((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_370, "($#)($#)", 8); STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18); STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14); STRING_LITERAL(T839829468_373, "#reprBool($1)", 13); STRING_LITERAL(T839829468_374, "#reprChar($1)", 13); STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21); STRING_LITERAL(T839829468_376, "#reprStr($1)", 12); STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16); STRING_LITERAL(T839829468_378, "$1, $1Len0", 10); STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16); STRING_LITERAL(T839829468_380, "$1, $2", 6); STRING_LITERAL(T839829468_381, "genRepr()", 9); STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22); STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16); STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34); STRING_LITERAL(T839829468_385, "($1 - 1)", 8); STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14); STRING_LITERAL(T839829468_387, "binaryStmt", 10); STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11); STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11); NIM_CONST TY559052 opr_559050_839829468 = {((NimStringDesc*) &T839829468_388), ((NimStringDesc*) &T839829468_389)} ; NIM_CONST TY559052 fun64_559055_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; NIM_CONST TY559052 fun_559060_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22); STRING_LITERAL(T839829468_391, "$1->$2 + ", 9); STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24); STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27); STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24); STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31); STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47); STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39); STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16); STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11); STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23); STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18); STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26); STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25); STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13); STRING_LITERAL(T839829468_405, "$1 == 0", 7); STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16); STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18); STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17); STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17); STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18); STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17); STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43); STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14); STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15); STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17); STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25); STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34); STRING_LITERAL(T839829468_418, "($1)", 4); STRING_LITERAL(T839829468_419, "sizeof($1)", 10); STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26); STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23); STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20); STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28); STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23); STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20); STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27); STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16); STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13); STRING_LITERAL(T839829468_430, "(($1) ($2))", 11); STRING_LITERAL(T839829468_431, "($1Len0-1)", 10); STRING_LITERAL(T839829468_432, "$1Len0", 6); STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26); STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21); STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27); STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22); STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23); STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18); STRING_LITERAL(T839829468_439, "genArrayLen()", 13); STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13); STRING_LITERAL(T839829468_441, "$1->len", 7); STRING_LITERAL(T839829468_442, "unaryStmt", 9); STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16); STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18); STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29); STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54); STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46); STRING_LITERAL(T839829468_448, "($1- $2)", 8); STRING_LITERAL(T839829468_449, "$1 |= ((", 8); STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19); STRING_LITERAL(T839829468_451, ")*8));$n", 8); STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10); STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23); STRING_LITERAL(T839829468_454, ")*8)));$n", 9); STRING_LITERAL(T839829468_455, "#countBits32($1)", 16); STRING_LITERAL(T839829468_456, "#countBits64($1)", 16); STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29); STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16); STRING_LITERAL(T839829468_459, "($1 & $2)", 9); STRING_LITERAL(T839829468_460, "($1 | $2)", 9); STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11); STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9); STRING_LITERAL(T839829468_463, "fewCmps", 7); STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(T839829468_465, "$1 == $2", 8); STRING_LITERAL(T839829468_466, " || ", 4); STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30); STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31); STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31); STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(T839829468_472, "genSetOp()", 10); STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13); STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$n", 88); STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);" "$n", 129); STRING_LITERAL(T839829468_478, "|", 1); STRING_LITERAL(T839829468_479, "& ~", 3); STRING_LITERAL(T839829468_480, "^", 1); NIM_CONST TY558428 lookupopr_558426_839829468 = {((NimStringDesc*) &T839829468_476), ((NimStringDesc*) &T839829468_477), ((NimStringDesc*) &T839829468_52), ((NimStringDesc*) &T839829468_478), ((NimStringDesc*) &T839829468_479), ((NimStringDesc*) &T839829468_480)} ; STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16); STRING_LITERAL(T839829468_482, ")==0)", 5); STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(T839829468_484, "genSetOp", 8); STRING_LITERAL(T839829468_485, "$1->data", 8); STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22); STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29); STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26); STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14); STRING_LITERAL(T839829468_490, "", 0); STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22); STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20); STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51); STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9); STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22); STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31); STRING_LITERAL(T839829468_497, ";$n", 3); STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21); NIM_CONST TY205018 T839829468_498 = {((NimStringDesc*) &T839829468_499), ((NI) 423)} ; static NIM_CONST char136Set T839829468_500 = { 0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ; STRING_LITERAL(T839829468_501, "wrong argument count", 20); STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40); NIM_CONST TY205018 T839829468_503 = {((NimStringDesc*) &T839829468_499), ((NI) 328)} ; STRING_LITERAL(T839829468_504, "->", 2); STRING_LITERAL(T839829468_505, ");$n", 4); STRING_LITERAL(T839829468_506, "[", 1); NIM_CONST TY205018 T839829468_507 = {((NimStringDesc*) &T839829468_499), ((NI) 472)} ; STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31); STRING_LITERAL(T839829468_509, "Result: ", 8); STRING_LITERAL(T839829468_510, "];$n", 4); STRING_LITERAL(T839829468_511, "]", 1); NIM_CONST TY205018 T839829468_512 = {((NimStringDesc*) &T839829468_265), ((NI) 925)} ; STRING_LITERAL(T839829468_513, "<stdio.h>", 9); STRING_LITERAL(T839829468_514, ", \"nil\"", 7); STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22); STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15); STRING_LITERAL(T839829468_517, "%s", 2); STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17); STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34); STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62); STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13); STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14); STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28); STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39); STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20); STRING_LITERAL(T839829468_530, "$1 |=((", 7); STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20); STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21); STRING_LITERAL(T839829468_533, "genObjConstr", 12); STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52); STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55); STRING_LITERAL(T839829468_536, "LOC$1.source", 12); STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38); STRING_LITERAL(T839829468_538, "LOC$#.dest", 10); STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46); STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45); STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12); STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50); STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_545, "genTupleElem", 12); STRING_LITERAL(T839829468_546, ".Field$1", 8); STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20); STRING_LITERAL(T839829468_548, "genDeref ", 9); STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17); STRING_LITERAL(T839829468_550, "genRecordField 3", 16); STRING_LITERAL(T839829468_551, ".$1", 3); STRING_LITERAL(T839829468_552, "} $1: ;$n", 9); STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13); STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13); STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19); STRING_LITERAL(T839829468_556, "goto $1;$n", 10); STRING_LITERAL(T839829468_557, "genIf()", 7); STRING_LITERAL(T839829468_558, "->Sup", 5); STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11); STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34); STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26); STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21); STRING_LITERAL(T839829468_563, "chckRangeF", 10); STRING_LITERAL(T839829468_564, "chckRange64", 11); STRING_LITERAL(T839829468_565, "chckRange", 9); STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11); STRING_LITERAL(T839829468_567, "closure to closure created", 26); STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31); STRING_LITERAL(T839829468_569, "while (1) {$n", 13); STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51); STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51); STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50); STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41); STRING_LITERAL(T839829468_574, "TMP$1", 5); STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23); STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9); STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11); STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15); STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46); STRING_LITERAL(T839829468_580, "TMP$#:$n", 8); STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16); STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37); STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_585, "$2* $1;$n", 9); STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34); STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28); STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25); STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31); STRING_LITERAL(T839829468_590, "$#($#);$n", 9); STRING_LITERAL(T839829468_591, "$# = $#;$n", 10); STRING_LITERAL(T839829468_592, "genVarTuple", 11); STRING_LITERAL(T839829468_593, "genConstStmt", 12); STRING_LITERAL(T839829468_594, "for statement not eliminated", 28); STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34); STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33); STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21); STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12); STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9); STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36); STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24); STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14); STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15); STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23); STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18); STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25); STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45); STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17); STRING_LITERAL(T839829468_609, "no loop to break", 16); STRING_LITERAL(T839829468_610, "extern $1", 9); STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62); STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18); STRING_LITERAL(T839829468_613, "\"", 1); STRING_LITERAL(T839829468_614, "\\n\"\012", 4); STRING_LITERAL(T839829468_615, "Exception", 9); STRING_LITERAL(T839829468_616, "E_Base", 6); STRING_LITERAL(T839829468_617, "try {$n", 7); STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30); STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26); STRING_LITERAL(T839829468_620, "else ", 5); STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26); STRING_LITERAL(T839829468_622, "if ($1) ", 8); STRING_LITERAL(T839829468_623, "throw;$n", 8); STRING_LITERAL(T839829468_624, "<setjmp.h>", 10); STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17); STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22); STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12); STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33); STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12); STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39); STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12); STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34); STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23); STRING_LITERAL(T839829468_634, "else {$n", 8); STRING_LITERAL(T839829468_635, "else", 4); STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16); STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46); STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42); STRING_LITERAL(T839829468_639, "if ($1) {$n", 11); STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42); STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39); STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22); STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15); STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14); STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18); STRING_LITERAL(T839829468_646, "bp", 2); STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57); STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47); STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58); STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21); NIM_CONST TY205018 T839829468_650 = {((NimStringDesc*) &T839829468_651), ((NI) 145)} ; STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12); STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26); STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24); STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31); STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39); STRING_LITERAL(T839829468_657, "); unknown node kind", 20); NIM_CONST TY205018 T839829468_658 = {((NimStringDesc*) &T839829468_651), ((NI) 1122)} ; STRING_LITERAL(T839829468_659, "Init000", 7); STRING_LITERAL(T839829468_660, "DatInit000", 10); STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41); STRING_LITERAL(T839829468_662, "\011$1();$N", 8); STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa" "in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N" "imMainInner;$N$2\011(*inner)();$N}$N$N", 162); STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N " " HINSTANCE hPrevInstance, $N LP" "STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program" "_result;$N}$N$N", 206); STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC" "L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()" ";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175); STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N " " LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC" "ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175); STRING_LITERAL(T839829468_667, "<windows.h>", 11); STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59); STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim" "MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void" " (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011(" "*inner)();$N}$N$N", 208); STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48); STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;" "$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog" "ram_result;$N}$N$N", 145); STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21); STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19); STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26); STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40); STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa" "in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner" " = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168); STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30); STRING_LITERAL(T839829468_678, "still forwarded: ", 17); STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42); STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26); STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26); STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25); STRING_LITERAL(T839829468_683, "}$N$N", 5); STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46); STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(T839829468_686, "0.15.0", 6); STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); extern NIM_CONST TY178082 Os_178068_4151366050; extern NIM_CONST TY178510 Cpu_178496_4151366050; STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22); STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20); STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15); STRING_LITERAL(T839829468_692, "#include $1$N", 13); STRING_LITERAL(T839829468_693, "extern \"C\"", 10); STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(T839829468_695, "__$1__", 6); STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17); Tcgen531027* generatedheader_534201_839829468; extern TNimType NTI531015; /* BModule */ Ropeobj180006* indent_534655_839829468; extern TNimType NTI180004; /* Rope */ extern Gcheap49818 gch_49858_1689653243; Ropeobj180006* nimtv_540656_839829468; Ttypeseq294836* nimtvdeps_540674_839829468; extern TNimType NTI294836; /* TTypeSeq */ Intset270030 nimtvdeclared_540675_839829468; extern TNimType NTI270030; /* IntSet */ NI breakpointid_550860_839829468; Ropeobj180006* gbreakpoints_550861_839829468; extern TY531153* gmodules_531170_3723162438; extern TNimType NTI531027; /* TCGen */ extern Debuginfo205009 gdebuginfo_205470_1926258066; extern Toption171009Set goptions_171128_2607990831; extern TNimType NTI294804; /* TSymSeq */ extern Tglobaloption171013Set gglobaloptions_171130_2607990831; extern NimStringDesc* headerfile_171138_2607990831; extern NimStringDesc* gprojectfull_171211_2607990831; extern Tcommands171076 gcmd_171132_2607990831; extern NI gerrorcounter_194072_155036129; extern Ropeobj180006* rnl_180903_2381377266; extern NI gforwardedprocscounter_531171_3723162438; extern TNimType NTI294244; /* TTypeKind */ extern TNimType NTI205017; /* seq[(string, int)] */ extern Tsystemcc275002 ccompiler_275431_2528170400; extern NimStringDesc* tnl_178644_4151366050; extern NI floatsize_178642_4151366050; extern Tgcmode171080 gselectedgc_171133_2607990831; extern TNimType NTI294020; /* TNodeKind */ extern TNimType NTI136002; /* seq[string] */ extern TNimType NTI294435; /* TSymKind */ extern TNimType NTI294816; /* TLoc */ extern NI intsize_178641_4151366050; extern TNimType NTI294524; /* TMagic */ extern TNimType NTI193350; /* seq[Rope] */ extern TNimType NTI294796; /* TNodeSeq */ extern Ropeobj180006* mainmodprocs_531148_3723162438; extern Ropeobj180006* maindatinit_531151_3723162438; extern Ropeobj180006* mainmodinit_531149_3723162438; extern Ropeobj180006* othermodsinit_531150_3723162438; extern Tsystemos178004 targetos_178629_4151366050; extern TY193612* fileinfos_193629_155036129; extern Tsystemcpu178452 targetcpu_178627_4151366050; extern Ropeobj180006* gmapping_531152_3723162438; N_NIMCALL(void, T839829468_2)(void) { nimGCvisit((void*)generatedheader_534201_839829468, 0); } N_NIMCALL(void, T839829468_3)(void) { nimGCvisit((void*)indent_534655_839829468, 0); } static N_INLINE(Cell47304*, usrtocell_51440_1689653243)(void* usr0) { Cell47304* result0; result0 = (Cell47304*)0; result0 = ((Cell47304*) ((NI)((NU32)(((NI) (usr0))) - (NU32)(((NI)sizeof(Cell47304)))))); return result0; } static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47304* c0) { addzct_51417_1689653243((&gch_49858_1689653243.zct), c0); } static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) { { Cell47304* c0; if (!!((src0 == NIM_NIL))) goto LA3; c0 = usrtocell_51440_1689653243(src0); (*c0).refcount += ((NI) 8); } LA3: ; { Cell47304* c0; if (!!(((*dest0) == NIM_NIL))) goto LA7; c0 = usrtocell_51440_1689653243((*dest0)); { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA11; rtladdzct_52601_1689653243(c0); } LA11: ; } LA7: ; (*dest0) = src0; } N_NIMCALL(void, T839829468_5)(void) { nimGCvisit((void*)nimtv_540656_839829468, 0); } N_NIMCALL(void, T839829468_6)(void) { nimGCvisit((void*)nimtvdeps_540674_839829468, 0); } static N_INLINE(void, nimGCunrefNoCycle)(void* p0) { Cell47304* c0; c0 = usrtocell_51440_1689653243(p0); { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3; rtladdzct_52601_1689653243(c0); } LA3: ; } N_NIMCALL(void, T839829468_7)(void) { nimGCvisit((void*)nimtvdeclared_540675_839829468.head, 0); nimGCvisit((void*)nimtvdeclared_540675_839829468.data, 0); } N_NIMCALL(void, T839829468_8)(void) { nimGCvisit((void*)gbreakpoints_550861_839829468, 0); } N_NIMCALL(Tcgen531027*, getcgenmodule_534226_839829468)(Tsym294834* s0) { Tcgen531027* result0; result0 = (Tcgen531027*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (((NI) 0) <= (*s0).position); if (!(LOC3)) goto LA4; LOC3 = ((*s0).position < (gmodules_531170_3723162438 ? gmodules_531170_3723162438->Sup.len : 0)); LA4: ; if (!LOC3) goto LA5; result0 = gmodules_531170_3723162438->data[(*s0).position]; } goto LA1; LA5: ; { result0 = NIM_NIL; } LA1: ; return result0; } static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) { void* LOC1; LOC1 = (void*)0; LOC1 = memcpy(dest0, source0, ((size_t) (size0))); } static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) { copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1))))); (*dest0).Sup.len += (*src0).Sup.len; } N_NIMCALL(NU32, hashowner_534977_839829468)(Tsym294834* s0) { NU32 result0; Tsym294834* m0; Tsym294834* p0; result0 = (NU32)0; m0 = s0; { while (1) { if (!!(((*m0).kind == ((Tsymkind294435) 6)))) goto LA2; m0 = (*m0).owner; } LA2: ; } p0 = (*m0).owner; result0 = register_205121_1926258066((&gdebuginfo_205470_1926258066), (*(*p0).name).s, (*(*m0).name).s); return result0; } static N_INLINE(void, incref_53419_1689653243)(Cell47304* c0) { (*c0).refcount = (NI)((NU32)((*c0).refcount) + (NU32)(((NI) 8))); } static N_INLINE(void, decref_53001_1689653243)(Cell47304* c0) { { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3; rtladdzct_52601_1689653243(c0); } LA3: ; } static N_INLINE(void, asgnRef)(void** dest0, void* src0) { { Cell47304* LOC5; if (!!((src0 == NIM_NIL))) goto LA3; LOC5 = (Cell47304*)0; LOC5 = usrtocell_51440_1689653243(src0); incref_53419_1689653243(LOC5); } LA3: ; { Cell47304* LOC10; if (!!(((*dest0) == NIM_NIL))) goto LA8; LOC10 = (Cell47304*)0; LOC10 = usrtocell_51440_1689653243((*dest0)); decref_53001_1689653243(LOC10); } LA8: ; (*dest0) = src0; } N_NIMCALL(Toption171009Set, initprocoptions_564635_839829468)(Tcgen531027* m0) { Toption171009Set result0; memset((void*)(&result0), 0, sizeof(result0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0)) goto LA3; result0 = (goptions_171128_2607990831 & ~ 32768); } goto LA1; LA3: ; { result0 = goptions_171128_2607990831; } LA1: ; return result0; } N_NIMCALL(Tcproc531021*, newpreinitproc_564625_839829468)(Tcgen531027* m0) { Tcproc531021* result0; result0 = (Tcproc531021*)0; result0 = newproc_531206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 100000); return result0; } N_NIMCALL(Tcproc531021*, newpostinitproc_564630_839829468)(Tcgen531027* m0) { Tcproc531021* result0; result0 = (Tcproc531021*)0; result0 = newproc_531206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 200000); return result0; } N_NIMCALL(Ropeobj180006*, gettempname_535596_839829468)(Tcgen531027* m0) { Ropeobj180006* result0; Ropeobj180006* LOC1; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = rope_180401_2381377266(((NI64) ((*m0).labels))); result0 = HEX26_180418_2381377266((*m0).tmpbase, LOC1); (*m0).labels += ((NI) 1); return result0; } N_NIMCALL(Tcgen531027*, rawnewmodule_564663_839829468)(Tsym294834* module0, NimStringDesc* filename0) { Tcgen531027* result0; NimStringDesc* LOC1; NU32 LOC2; NimStringDesc* LOC3; NimStringDesc* LOC4; NimStringDesc* LOC5; result0 = (Tcgen531027*)0; result0 = (Tcgen531027*) newObj((&NTI531015), sizeof(Tcgen531027)); (*result0).Sup.Sup.m_type = (&NTI531027); LOC1 = (NimStringDesc*)0; LOC2 = (NU32)0; LOC2 = hashowner_534977_839829468(module0); LOC3 = (NimStringDesc*)0; LOC3 = HEX24_8401_1689653243(((NU64) (LOC2))); LOC1 = rawNewString(LOC3->Sup.len + 2); appendString(LOC1, ((NimStringDesc*) &T839829468_11)); appendString(LOC1, LOC3); appendString(LOC1, ((NimStringDesc*) &T839829468_12)); asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_180277_2381377266(LOC1)); initlinkedlist_148031_3771138726((&(*result0).headerfiles)); initintset_270885_2627731572((&(*result0).declaredthings)); initintset_270885_2627731572((&(*result0).declaredprotos)); LOC4 = (NimStringDesc*)0; LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0); if (LOC4) nimGCunrefNoCycle(LOC4); LOC5 = (NimStringDesc*)0; LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0); if (LOC5) nimGCunrefNoCycle(LOC5); initidtable_298019_850551059((&(*result0).typecache)); initidtable_298019_850551059((&(*result0).forwtypecache)); asgnRefNoCycle((void**) (&(*result0).module), module0); initintset_270885_2627731572((&(*result0).typeinfomarker)); asgnRef((void**) (&(*result0).initproc), newproc_531206_3723162438(NIM_NIL, result0)); (*(*result0).initproc).options = initprocoptions_564635_839829468(result0); asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_564625_839829468(result0)); asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_564630_839829468(result0)); initnodetable_298085_850551059((&(*result0).datacache)); if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack); (*result0).typestack = (Ttypeseq294836*) newSeqRC1((&NTI294836), 0); if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs); (*result0).forwardedprocs = (Tsymseq294804*) newSeqRC1((&NTI294804), 0); asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_535596_839829468(result0)); asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_535596_839829468(result0)); { if (!(((*module0).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0)) goto LA8; (*result0).flags |= ((NU8)1)<<((((Codegenflag531025) 0))%(sizeof(NU8)*8)); (*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption171009) 15)) % (sizeof(NU32)*8))); (*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption171009) 15)) % (sizeof(NU32)*8))); } LA8: ; return result0; } N_NIMCALL(Tcgen531027*, rawnewmodule_565038_839829468)(Tsym294834* module0) { Tcgen531027* result0; NimStringDesc* LOC1; result0 = (Tcgen531027*)0; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_194264_155036129(((NI32) ((*module0).position))); result0 = rawnewmodule_564663_839829468(module0, LOC1); return result0; } N_NIMCALL(Tcgen531027*, newmodule_565045_839829468)(Tsym294834* module0) { Tcgen531027* result0; result0 = (Tcgen531027*)0; { Tcgen531027* LOC3; NimStringDesc* LOC6; LOC3 = (Tcgen531027*)0; LOC3 = getcgenmodule_534226_839829468(module0); if (!!((LOC3 == NIM_NIL))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_198185_1689653243(T839829468_9); internalerror_198113_155036129(LOC6); } LA4: ; result0 = rawnewmodule_565038_839829468(module0); { if (!((gmodules_531170_3723162438 ? gmodules_531170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9; gmodules_531170_3723162438 = (TY531153*) setLengthSeq(&(gmodules_531170_3723162438)->Sup, sizeof(Tcgen531027*), ((NI) ((NI)((*module0).position + ((NI) 1))))); } LA9: ; asgnRef((void**) (&gmodules_531170_3723162438->data[(*module0).position]), result0); { if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 2))&63U)))!=0)) goto LA13; { NimStringDesc* LOC19; NimStringDesc* LOC20; if (!(((*module0).flags &(1U<<((NU)(((Tsymflag294184) 25))&31U)))!=0)) goto LA17; LOC19 = (NimStringDesc*)0; LOC20 = (NimStringDesc*)0; LOC20 = tofilename_194260_155036129(((NI32) ((*module0).position))); LOC19 = rawNewString(LOC20->Sup.len + 28); appendString(LOC19, ((NimStringDesc*) &T839829468_13)); appendString(LOC19, LOC20); internalerror_198113_155036129(LOC19); } LA17: ; } LA13: ; return result0; } N_NIMCALL(Tpasscontext343002*, myopen_565115_839829468)(Tsym294834* module0) { Tpasscontext343002* result0; Tcgen531027* LOC1; result0 = (Tpasscontext343002*)0; LOC1 = (Tcgen531027*)0; LOC1 = newmodule_565045_839829468(module0); result0 = &LOC1->Sup; { NIM_BOOL LOC4; NimStringDesc* f0; NimStringDesc* LOC13; NimStringDesc* LOC14; LOC4 = (NIM_BOOL)0; LOC4 = ((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 27))&63U)))!=0); if (!(LOC4)) goto LA5; LOC4 = (generatedheader_534201_839829468 == NIM_NIL); LA5: ; if (!LOC4) goto LA6; { if (!(((NI) 0) < (headerfile_171138_2607990831 ? headerfile_171138_2607990831->Sup.len : 0))) goto LA10; f0 = headerfile_171138_2607990831; } goto LA8; LA10: ; { f0 = gprojectfull_171211_2607990831; } LA8: ; LOC13 = (NimStringDesc*)0; LOC13 = completecfilepath_275854_2528170400(f0, NIM_TRUE); LOC14 = (NimStringDesc*)0; LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14)); asgnRef((void**) (&generatedheader_534201_839829468), rawnewmodule_564663_839829468(module0, LOC14)); (*generatedheader_534201_839829468).flags |= ((NU8)1)<<((((Codegenflag531025) 3))%(sizeof(NU8)*8)); } LA6: ; return result0; } N_NIMCALL(NimStringDesc*, getcfile_565204_839829468)(Tcgen531027* m0) { NimStringDesc* result0; NimStringDesc* ext0; NimStringDesc* LOC13; NimStringDesc* LOC14; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; ext0 = copyString(((NimStringDesc*) &T839829468_15)); } goto LA1; LA5: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (gcmd_171132_2607990831 == ((Tcommands171076) 3)); if (LOC8) goto LA9; LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 28))&31U)))!=0); LA9: ; if (!LOC8) goto LA10; ext0 = copyString(((NimStringDesc*) &T839829468_16)); } goto LA1; LA10: ; { ext0 = copyString(((NimStringDesc*) &T839829468_17)); } LA1: ; LOC13 = (NimStringDesc*)0; LOC13 = withpackagename_172073_2607990831((*m0).cfilename); LOC14 = (NimStringDesc*)0; LOC14 = completecfilepath_275854_2528170400(LOC13, NIM_TRUE); result0 = noschangeFileExt(LOC14, ext0); return result0; } N_NIMCALL(Tpasscontext343002*, myopencached_565249_839829468)(Tsym294834* module0, Trodreader334021* rd0) { Tpasscontext343002* result0; Tcgen531027* m0; NimStringDesc* LOC1; result0 = (Tpasscontext343002*)0; m0 = newmodule_565045_839829468(module0); LOC1 = (NimStringDesc*)0; LOC1 = getcfile_565204_839829468(m0); readmergeinfo_532613_2760143328(LOC1, m0); result0 = &m0->Sup; return result0; } static N_INLINE(NIM_BOOL, skipcodegen_343085_2355241294)(Tnode294802* n0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((NI) 0) < gerrorcounter_194072_155036129); return result0; } N_NIMCALL(void, fillloc_534282_839829468)(Tloc294816* a0, Tlockind294808 k0, Ttype294840* typ0, Ropeobj180006* r0, Tstorageloc294812 s0) { { if (!((*a0).k == ((Tlockind294808) 0))) goto LA3; (*a0).k = k0; unsureAsgnRef((void**) (&(*a0).t), typ0); (*a0).s = s0; { if (!((*a0).r == NIM_NIL)) goto LA7; unsureAsgnRef((void**) (&(*a0).r), r0); } LA7: ; } LA3: ; } N_NIMCALL(NIM_BOOL, iskeyword_534960_839829468)(Tident201010* w0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; switch ((*w0).Sup.id) { case ((NI) 200) ... ((NI) 262): case ((NI) 4) ... ((NI) 70): case ((NI) 138): { result0 = NIM_TRUE; goto BeforeRet; } break; default: { result0 = NIM_FALSE; goto BeforeRet; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj180006*, manglename_535205_839829468)(Tsym294834* s0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = (*s0).loc.r; { NIM_BOOL keeporigname0; NIM_BOOL LOC5; NIM_BOOL LOC6; NIM_BOOL LOC9; NimStringDesc* LOC10; if (!(result0 == NIM_NIL)) goto LA3; LOC5 = (NIM_BOOL)0; LOC6 = (NIM_BOOL)0; LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0); if (!(LOC6)) goto LA7; LOC6 = ((IL64(2149580812) & (*s0).flags) == 0); LA7: ; LOC5 = LOC6; if (!(LOC5)) goto LA8; LOC9 = (NIM_BOOL)0; LOC9 = iskeyword_534960_839829468((*s0).name); LOC5 = !(LOC9); LA8: ; keeporigname0 = LOC5; LOC10 = (NimStringDesc*)0; LOC10 = mangle_530847_2036603609((*(*s0).name).s); result0 = rope_180277_2381377266(LOC10); { if (!keeporigname0) goto LA13; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_18)); } goto LA11; LA13: ; { TY535289 LOC16; Ropeobj180006* LOC17; Ropeobj180006* LOC18; TY535289 LOC19; Ropeobj180006* LOC20; NU32 LOC21; Ropeobj180006* LOC22; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj180006*)0; LOC17 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0); add_180482_2381377266(&result0, LOC17); LOC18 = (Ropeobj180006*)0; LOC18 = rope_180401_2381377266(((NI64) ((*s0).Sup.id))); add_180482_2381377266(&result0, LOC18); memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ropeobj180006*)0; LOC20 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0); add_180482_2381377266(&result0, LOC20); LOC21 = (NU32)0; LOC21 = hashowner_534977_839829468(s0); LOC22 = (Ropeobj180006*)0; LOC22 = rope_180401_2381377266(((NI64) (LOC21))); add_180482_2381377266(&result0, LOC22); } LA11: ; asgnRefNoCycle((void**) (&(*s0).loc.r), result0); } LA3: ; return result0; } N_NIMCALL(void, fillprocloc_541201_839829468)(Tsym294834* sym0) { { Ropeobj180006* LOC5; if (!((*sym0).loc.k == ((Tlockind294808) 0))) goto LA3; LOC5 = (Ropeobj180006*)0; LOC5 = manglename_535205_839829468(sym0); fillloc_534282_839829468((&(*sym0).loc), ((Tlockind294808) 7), (*sym0).typ, LOC5, ((Tstorageloc294812) 2)); } LA3: ; } N_NIMCALL(void, useheader_534369_839829468)(Tcgen531027* m0, Tsym294834* sym0) { { NimStringDesc* LOC5; NIM_BOOL LOC6; if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 6))&15U)))!=0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = getstr_299230_850551059((*(*sym0).annex).path); LOC6 = (NIM_BOOL)0; LOC6 = includestr_148249_3771138726((&(*m0).headerfiles), LOC5); } LA3: ; } static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) { (*dest0).data[((*dest0).Sup.len)- 0] = c0; (*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0; (*dest0).Sup.len += ((NI) 1); } N_NIMCALL(NIM_BOOL, isactivated_563431_839829468)(Tsym294834* prc0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = !(((*prc0).typ == NIM_NIL)); return result0; } N_NIMCALL(void, addforwardedproc_534203_839829468)(Tcgen531027* m0, Tsym294834* prc0) { (*m0).forwardedprocs = (Tsymseq294804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym294834*)); asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0); ++(*m0).forwardedprocs->Sup.len; gforwardedprocscounter_531171_3723162438 += ((NI) 1); } N_NIMCALL(void, genclinedir_534725_839829468)(Ropeobj180006** r0, NimStringDesc* filename0, NI line0) { { TY534811 LOC5; NimStringDesc* LOC6; if (!((goptions_171128_2607990831 &(1U<<((NU)(((Toption171009) 10))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NimStringDesc*)0; LOC6 = makesinglelinecstring_530835_2036603609(filename0); LOC5[0] = rope_180277_2381377266(LOC6); LOC5[1] = rope_180401_2381377266(((NI64) (line0))); addf_181205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2); } LA3: ; } static N_INLINE(NI, tolinenumber_194415_155036129)(Tlineinfo193336 info0) { NI result0; result0 = (NI)0; result0 = ((NI) (info0.line)); return result0; } N_NIMCALL(NI, safelinenm_534721_839829468)(Tlineinfo193336 info0) { NI result0; result0 = (NI)0; result0 = tolinenumber_194415_155036129(info0); { if (!(result0 < ((NI) 0))) goto LA3; result0 = ((NI) 0); } LA3: ; return result0; } N_NIMCALL(void, genclinedir_534813_839829468)(Ropeobj180006** r0, Tlineinfo193336 info0) { NimStringDesc* LOC1; NI LOC2; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_194264_155036129(info0.fileindex); LOC2 = (NI)0; LOC2 = safelinenm_534721_839829468(info0); genclinedir_534725_839829468(r0, LOC1, LOC2); } N_NIMCALL(Tctypekind531007, mapsettype_535389_839829468)(Ttype294840* typ0) { Tctypekind531007 result0; NI64 LOC1; result0 = (Tctypekind531007)0; LOC1 = (NI64)0; LOC1 = getsize_322135_3876443242(typ0); switch (((NI) (LOC1))) { case ((NI) 1): { result0 = ((Tctypekind531007) 4); } break; case ((NI) 2): { result0 = ((Tctypekind531007) 5); } break; case ((NI) 4): { result0 = ((Tctypekind531007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind531007) 7); } break; default: { result0 = ((Tctypekind531007) 17); } break; } return result0; } N_NIMCALL(Tctypekind531007, maptype_535393_839829468)(Ttype294840* typ0) { Tctypekind531007 result0; result0 = (Tctypekind531007)0; switch ((*typ0).kind) { case ((Ttypekind294244) 0): case ((Ttypekind294244) 7): { result0 = ((Tctypekind531007) 0); } break; case ((Ttypekind294244) 1): { result0 = ((Tctypekind531007) 2); } break; case ((Ttypekind294244) 2): { result0 = ((Tctypekind531007) 1); } break; case ((Ttypekind294244) 19): { result0 = mapsettype_535389_839829468(typ0); } break; case ((Ttypekind294244) 27): case ((Ttypekind294244) 4): case ((Ttypekind294244) 16): case ((Ttypekind294244) 48): { result0 = ((Tctypekind531007) 17); } break; case ((Ttypekind294244) 17): case ((Ttypekind294244) 18): { result0 = ((Tctypekind531007) 19); } break; case ((Ttypekind294244) 10): case ((Ttypekind294244) 11): case ((Ttypekind294244) 12): case ((Ttypekind294244) 13): case ((Ttypekind294244) 15): case ((Ttypekind294244) 46): case ((Ttypekind294244) 47): case ((Ttypekind294244) 49): case ((Ttypekind294244) 8): { Ttype294840* LOC8; LOC8 = (Ttype294840*)0; LOC8 = lastson_297377_850551059(typ0); result0 = maptype_535393_839829468(LOC8); } break; case ((Ttypekind294244) 14): { { NI64 LOC12; LOC12 = (NI64)0; LOC12 = firstord_322001_3876443242(typ0); if (!(LOC12 < IL64(0))) goto LA13; result0 = ((Tctypekind531007) 6); } goto LA10; LA13: ; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = getsize_322135_3876443242(typ0); switch (((NI) (LOC16))) { case ((NI) 1): { result0 = ((Tctypekind531007) 13); } break; case ((NI) 2): { result0 = ((Tctypekind531007) 14); } break; case ((NI) 4): { result0 = ((Tctypekind531007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind531007) 7); } break; default: { internalerror_198113_155036129(((NimStringDesc*) &T839829468_25)); } break; } } LA10: ; } break; case ((Ttypekind294244) 20): { result0 = maptype_535393_839829468((*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind294244) 21): case ((Ttypekind294244) 23): case ((Ttypekind294244) 22): { Ttype294840* base0; Ttype294840* LOC24; LOC24 = (Ttype294840*)0; LOC24 = lastson_297377_850551059(typ0); base0 = skiptypes_298099_850551059(LOC24, IL64(211106232576256)); switch ((*base0).kind) { case ((Ttypekind294244) 27): case ((Ttypekind294244) 4): case ((Ttypekind294244) 16): case ((Ttypekind294244) 48): { result0 = ((Tctypekind531007) 18); } break; default: { result0 = ((Tctypekind531007) 20); } break; } } break; case ((Ttypekind294244) 26): { result0 = ((Tctypekind531007) 20); } break; case ((Ttypekind294244) 24): { result0 = ((Tctypekind531007) 22); } break; case ((Ttypekind294244) 25): { { if (!!(((*typ0).callconv == ((Tcallingconvention294002) 8)))) goto LA32; result0 = ((Tctypekind531007) 23); } goto LA30; LA32: ; { result0 = ((Tctypekind531007) 19); } LA30: ; } break; case ((Ttypekind294244) 28): { result0 = ((Tctypekind531007) 21); } break; case ((Ttypekind294244) 29): { result0 = ((Tctypekind531007) 24); } break; case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44): { result0 = ((Tctypekind531007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3)))); } break; case ((Ttypekind294244) 59): { { Ttype294840* LOC43; if (!!(((*typ0).n == NIM_NIL))) goto LA41; LOC43 = (Ttype294840*)0; LOC43 = lastson_297377_850551059(typ0); result0 = maptype_535393_839829468(LOC43); } goto LA39; LA41: ; { internalerror_198113_155036129(((NimStringDesc*) &T839829468_25)); } LA39: ; } break; default: { internalerror_198113_155036129(((NimStringDesc*) &T839829468_25)); } break; } return result0; } N_NIMCALL(NIM_BOOL, isimportedcpptype_535476_839829468)(Ttype294840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, needscomplexassignment_535509_839829468)(Ttype294840* typ0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = containsgarbagecollectedref_322117_3876443242(typ0); return result0; } static N_INLINE(NIM_BOOL, isobjlackingtypefield_535513_839829468)(Ttype294840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; NIM_BOOL LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*typ0).kind == ((Ttypekind294244) 17)); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL); LA5: ; LOC3 = LOC4; if (LOC3) goto LA6; LOC3 = ispureobject_322138_3876443242(typ0); LA6: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, isinvalidreturntype_535548_839829468)(Ttype294840* rettype0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!(rettype0 == NIM_NIL)) goto LA3; result0 = NIM_TRUE; } goto LA1; LA3: ; { Tctypekind531007 LOC6; LOC6 = (Tctypekind531007)0; LOC6 = maptype_535393_839829468(rettype0); switch (LOC6) { case ((Tctypekind531007) 17): { Ttype294840* LOC8; LOC8 = (Ttype294840*)0; LOC8 = skiptypes_298099_850551059(rettype0, IL64(211106232576256)); result0 = !(((*LOC8).kind == ((Ttypekind294244) 23) || (*LOC8).kind == ((Ttypekind294244) 22) || (*LOC8).kind == ((Ttypekind294244) 21))); } break; case ((Tctypekind531007) 19): { Ttype294840* t0; NIM_BOOL LOC16; NIM_BOOL LOC18; NIM_BOOL LOC20; t0 = skiptypes_298099_850551059(rettype0, IL64(211106232576256)); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = isimportedcpptype_535476_839829468(rettype0); if (LOC12) goto LA13; LOC12 = isimportedcpptype_535476_839829468(t0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; goto BeforeRet; } LA14: ; LOC16 = (NIM_BOOL)0; LOC16 = needscomplexassignment_535509_839829468(t0); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = ((*t0).kind == ((Ttypekind294244) 17)); if (!(LOC18)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = isobjlackingtypefield_535513_839829468(t0); LOC18 = !(LOC20); LA19: ; LOC16 = LOC18; LA17: ; result0 = LOC16; } break; default: { result0 = NIM_FALSE; } break; } } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj180006*, typename_535292_839829468)(Ttype294840* typ0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NimStringDesc* LOC5; if (!!(((*typ0).sym == NIM_NIL))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_530847_2036603609((*(*(*typ0).sym).name).s); result0 = rope_180277_2381377266(LOC5); } goto LA1; LA3: ; { TY535289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, gettypename_535313_839829468)(Ttype294840* typ0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*typ0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*typ0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*(*typ0).sym).loc.r; } goto LA1; LA5: ; { { Ropeobj180006* LOC12; Ropeobj180006* LOC13; if (!((*typ0).loc.r == NIM_NIL)) goto LA10; LOC12 = (Ropeobj180006*)0; LOC12 = typename_535292_839829468(typ0); LOC13 = (Ropeobj180006*)0; LOC13 = rope_180401_2381377266(((NI64) ((*typ0).Sup.id))); asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_180418_2381377266(LOC12, LOC13)); } LA10: ; result0 = (*typ0).loc.r; } LA1: ; { NimStringDesc* LOC18; if (!(result0 == NIM_NIL)) goto LA16; LOC18 = (NimStringDesc*)0; LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI294244))->Sup.len + 13); appendString(LOC18, ((NimStringDesc*) &T839829468_29)); appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI294244))); internalerror_198113_155036129(LOC18); } LA16: ; return result0; } N_NIMCALL(Ropeobj180006*, typenameorliteral_535898_839829468)(Ttype294840* t0, NimStringDesc* literal0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = !(((*t0).sym == NIM_NIL)); if (!(LOC4)) goto LA5; LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA6; LOC3 = ((*(*t0).sym).magic == ((Tmagic294524) 0)); LA6: ; if (!LOC3) goto LA7; result0 = gettypename_535313_839829468(t0); } goto LA1; LA7: ; { result0 = rope_180277_2381377266(literal0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, getsimpletypedesc_535936_839829468)(Tcgen531027* m0, Ttype294840* typ0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; switch ((*typ0).kind) { case ((Ttypekind294244) 26): { result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_30)); } break; case ((Ttypekind294244) 28): { Ropeobj180006* LOC3; LOC3 = (Ropeobj180006*)0; LOC3 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_31)); result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_32)); } break; case ((Ttypekind294244) 29): { result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_33)); } break; case ((Ttypekind294244) 1): { result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_34)); } break; case ((Ttypekind294244) 2): { result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_35)); } break; case ((Ttypekind294244) 5): { result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_18)); } break; case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44): { result0 = typenameorliteral_535898_839829468(typ0, Numericaltypetostr_535941_839829468[((*typ0).kind)- 31]); } break; case ((Ttypekind294244) 13): case ((Ttypekind294244) 20): case ((Ttypekind294244) 15): { result0 = getsimpletypedesc_535936_839829468(m0, (*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind294244) 59): { { Ttype294840* LOC15; if (!!(((*typ0).n == NIM_NIL))) goto LA13; LOC15 = (Ttype294840*)0; LOC15 = lastson_297377_850551059(typ0); result0 = getsimpletypedesc_535936_839829468(m0, LOC15); } goto LA11; LA13: ; { internalerror_198113_155036129(((NimStringDesc*) &T839829468_50)); } LA11: ; } break; case ((Ttypekind294244) 11): { Ttype294840* LOC18; LOC18 = (Ttype294840*)0; LOC18 = lastson_297377_850551059(typ0); result0 = getsimpletypedesc_535936_839829468(m0, LOC18); } break; default: { result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj180006*, cachegettype_535591_839829468)(Tidtable294850 tab0, Ttype294840* key0) { Ropeobj180006* result0; Tidobj201004* LOC1; TNimObject* LOC2; result0 = (Ropeobj180006*)0; LOC1 = (Tidobj201004*)0; LOC1 = &key0->Sup; LOC2 = (TNimObject*)0; LOC2 = idtableget_301086_2984716966(tab0, LOC1); result0 = ((Ropeobj180006*) (LOC2)); return result0; } N_NIMCALL(Ropeobj180006*, gettypepre_535972_839829468)(Tcgen531027* m0, Ttype294840* typ0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { if (!(typ0 == NIM_NIL)) goto LA3; result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_26)); } goto LA1; LA3: ; { result0 = getsimpletypedesc_535936_839829468(m0, typ0); { if (!(result0 == NIM_NIL)) goto LA8; result0 = cachegettype_535591_839829468((*m0).typecache, typ0); } LA8: ; } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, isimportedtype_535449_839829468)(Ttype294840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NimStringDesc*, getforwardstructformat_536015_839829468)(Tcgen531027* m0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; result0 = copyString(((NimStringDesc*) &T839829468_54)); } goto LA1; LA5: ; { result0 = copyString(((NimStringDesc*) &T839829468_55)); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, structorunion_536001_839829468)(Ttype294840* t0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag294431) 1))&31U)))!=0)) goto LA3; result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_56)); } goto LA1; LA3: ; { result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_57)); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, gettypeforward_536039_839829468)(Tcgen531027* m0, Ttype294840* typ0) { Ropeobj180006* result0; { result0 = (Ropeobj180006*)0; result0 = cachegettype_535591_839829468((*m0).forwtypecache, typ0); { if (!!((result0 == NIM_NIL))) goto LA3; goto BeforeRet; } LA3: ; result0 = gettypepre_535972_839829468(m0, typ0); { if (!!((result0 == NIM_NIL))) goto LA7; goto BeforeRet; } LA7: ; switch ((*typ0).kind) { case ((Ttypekind294244) 24): case ((Ttypekind294244) 18): case ((Ttypekind294244) 17): { Tidobj201004* LOC17; TNimObject* LOC18; result0 = gettypename_535313_839829468(typ0); { NIM_BOOL LOC12; NimStringDesc* LOC15; TY534811 LOC16; LOC12 = (NIM_BOOL)0; LOC12 = isimportedtype_535449_839829468(typ0); if (!!(LOC12)) goto LA13; LOC15 = (NimStringDesc*)0; LOC15 = getforwardstructformat_536015_839829468(m0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = structorunion_536001_839829468(typ0); LOC16[1] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 2))- 0], LOC15, LOC16, 2); } LA13: ; LOC17 = (Tidobj201004*)0; LOC17 = &typ0->Sup; LOC18 = (TNimObject*)0; LOC18 = &result0->Sup; idtableput_301094_2984716966((&(*m0).forwtypecache), LOC17, LOC18); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI294244))->Sup.len + 16); appendString(LOC20, ((NimStringDesc*) &T839829468_58)); appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI294244))); appendChar(LOC20, 41); internalerror_198113_155036129(LOC20); } break; } }BeforeRet: ; return result0; } N_NIMCALL(void, pushtype_535958_839829468)(Tcgen531027* m0, Ttype294840* typ0) { (*m0).typestack = (Ttypeseq294836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype294840*)); asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0); ++(*m0).typestack->Sup.len; } N_NIMCALL(Ropeobj180006*, gettypedescweak_536079_839829468)(Tcgen531027* m0, Ttype294840* t0, Intset270030* check0) { Ropeobj180006* result0; Ttype294840* etb0; result0 = (Ropeobj180006*)0; etb0 = skiptypes_298099_850551059(t0, IL64(211106232576256)); switch ((*etb0).kind) { case ((Ttypekind294244) 17): case ((Ttypekind294244) 18): { { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = isimportedcpptype_535476_839829468(etb0); if (!(LOC4)) goto LA5; LOC4 = ((*t0).kind == ((Ttypekind294244) 11)); LA5: ; if (!LOC4) goto LA6; result0 = gettypedescaux_535503_839829468(m0, t0, check0); } goto LA2; LA6: ; { Ttype294840* x0; x0 = getuniquetype_530640_2036603609(etb0); result0 = gettypeforward_536039_839829468(m0, x0); pushtype_535958_839829468(m0, x0); } LA2: ; } break; case ((Ttypekind294244) 24): { Ttype294840* x0; Ropeobj180006* LOC10; x0 = getuniquetype_530640_2036603609(etb0); LOC10 = (Ropeobj180006*)0; LOC10 = gettypeforward_536039_839829468(m0, x0); result0 = HEX26_180447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53)); pushtype_535958_839829468(m0, x0); } break; default: { result0 = gettypedescaux_535503_839829468(m0, t0, check0); } break; } return result0; } static N_INLINE(NI, len_295081_850551059)(Tnode294802* n0) { NI result0; result0 = (NI)0; { if (!(*n0).kindU.S6.sons == 0) goto LA3; result0 = ((NI) 0); } goto LA1; LA3: ; { result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0); } LA1: ; return result0; } N_NIMCALL(void, appcg_534632_839829468)(Tcgen531027* m0, Ropeobj180006** c0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006* LOC1; LOC1 = (Ropeobj180006*)0; LOC1 = ropecg_534407_839829468(m0, frmt0, args0, args0Len0); add_180482_2381377266(c0, LOC1); } N_NIMCALL(NIM_BOOL, scancppgenericslot_536827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) { NIM_BOOL result0; NI begin0; { result0 = (NIM_BOOL)0; (*cursor0) += ((NI) 1); begin0 = (*cursor0); { while (1) { if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2; (*cursor0) += ((NI) 1); } LA2: ; } { if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5; (*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48)))); (*outstars0) = (NI)((*cursor0) - begin0); (*cursor0) += ((NI) 1); result0 = NIM_TRUE; goto BeforeRet; } goto LA3; LA5: ; { result0 = NIM_FALSE; goto BeforeRet; } LA3: ; }BeforeRet: ; return result0; } N_NIMCALL(Ttype294840*, resolvestarsincpptype_536891_839829468)(Ttype294840* typ0, NI idx0, NI stars0) { Ttype294840* result0; result0 = (Ttype294840*)0; { NI LOC3; LOC3 = (NI)0; LOC3 = len_297339_850551059(typ0); if (!(LOC3 <= idx0)) goto LA4; internalerror_198113_155036129(((NimStringDesc*) &T839829468_81)); } LA4: ; result0 = (*typ0).sons->data[idx0]; { NI i_536906_839829468; NI res_536931_839829468; i_536906_839829468 = (NI)0; res_536931_839829468 = ((NI) 1); { while (1) { if (!(res_536931_839829468 <= stars0)) goto LA8; i_536906_839829468 = res_536931_839829468; { NIM_BOOL LOC11; NI LOC13; LOC11 = (NIM_BOOL)0; LOC11 = !((result0 == NIM_NIL)); if (!(LOC11)) goto LA12; LOC13 = (NI)0; LOC13 = len_297339_850551059(result0); LOC11 = (((NI) 0) < LOC13); LA12: ; if (!LOC11) goto LA14; { if (!((*result0).kind == ((Ttypekind294244) 11))) goto LA18; result0 = (*result0).sons->data[((NI) 1)]; } goto LA16; LA18: ; { result0 = elemtype_322394_3876443242(result0); } LA16: ; } LA14: ; res_536931_839829468 += ((NI) 1); } LA8: ; } } return result0; } N_NIMCALL(NimStringDesc*, manglefield_534973_839829468)(Tident201010* name0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = mangle_530847_2036603609((*name0).s); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = iskeyword_534960_839829468(name0); if (!LOC3) goto LA4; result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]); } LA4: ; return result0; } N_NIMCALL(Ropeobj180006*, manglerecfieldname_536361_839829468)(Tsym294834* field0, Ttype294840* rectype0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*rectype0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*field0).loc.r; } goto LA1; LA5: ; { NimStringDesc* LOC8; LOC8 = (NimStringDesc*)0; LOC8 = manglefield_534973_839829468((*field0).name); result0 = rope_180277_2381377266(LOC8); } LA1: ; { if (!(result0 == NIM_NIL)) goto LA11; internalerror_198100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96)); } LA11: ; return result0; } N_NIMCALL(Ropeobj180006*, genrecordfieldsaux_536421_839829468)(Tcgen531027* m0, Tnode294802* n0, Ropeobj180006* accessexpr0, Ttype294840* rectype0, Intset270030* check0) { Ropeobj180006* result0; Ropeobj180006* ae0; Ropeobj180006* uname0; Ropeobj180006* sname0; Ropeobj180006* a0; Tnode294802* k0; Tsym294834* field0; { result0 = (Ropeobj180006*)0; ae0 = (Ropeobj180006*)0; uname0 = (Ropeobj180006*)0; sname0 = (Ropeobj180006*)0; a0 = (Ropeobj180006*)0; k0 = (Tnode294802*)0; field0 = (Tsym294834*)0; result0 = NIM_NIL; switch ((*n0).kind) { case ((Tnodekind294020) 138): { { NI i_536447_839829468; NI HEX3Atmp_536620_839829468; NI LOC3; NI res_536623_839829468; i_536447_839829468 = (NI)0; HEX3Atmp_536620_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_297351_850551059(n0); HEX3Atmp_536620_839829468 = (NI)(LOC3 - ((NI) 1)); res_536623_839829468 = ((NI) 0); { while (1) { Ropeobj180006* LOC6; if (!(res_536623_839829468 <= HEX3Atmp_536620_839829468)) goto LA5; i_536447_839829468 = res_536623_839829468; LOC6 = (Ropeobj180006*)0; LOC6 = genrecordfieldsaux_536421_839829468(m0, (*n0).kindU.S6.sons->data[i_536447_839829468], accessexpr0, rectype0, check0); add_180482_2381377266(&result0, LOC6); res_536623_839829468 += ((NI) 1); } LA5: ; } } } break; case ((Tnodekind294020) 139): { Ropeobj180006* LOC12; NimStringDesc* LOC13; NimStringDesc* LOC14; Ropeobj180006* unionbody0; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)))) goto LA10; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89)); } LA10: ; LOC12 = (Ropeobj180006*)0; LOC12 = genrecordfieldsaux_536421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0); add_180482_2381377266(&result0, LOC12); LOC13 = (NimStringDesc*)0; LOC14 = (NimStringDesc*)0; LOC14 = mangle_530847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); LOC13 = rawNewString(LOC14->Sup.len + 1); appendString(LOC13, LOC14); appendChar(LOC13, 85); uname0 = rope_180277_2381377266(LOC13); { TY534811 LOC19; if (!!((accessexpr0 == NIM_NIL))) goto LA17; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = accessexpr0; LOC19[1] = uname0; ae0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2); } goto LA15; LA17: ; { ae0 = uname0; } LA15: ; unionbody0 = NIM_NIL; { NI i_536491_839829468; NI HEX3Atmp_536629_839829468; NI LOC22; NI res_536632_839829468; i_536491_839829468 = (NI)0; HEX3Atmp_536629_839829468 = (NI)0; LOC22 = (NI)0; LOC22 = sonslen_297351_850551059(n0); HEX3Atmp_536629_839829468 = (NI)(LOC22 - ((NI) 1)); res_536632_839829468 = ((NI) 1); { while (1) { if (!(res_536632_839829468 <= HEX3Atmp_536629_839829468)) goto LA24; i_536491_839829468 = res_536632_839829468; switch ((*(*n0).kindU.S6.sons->data[i_536491_839829468]).kind) { case ((Tnodekind294020) 85): case ((Tnodekind294020) 88): { k0 = lastson_297364_850551059((*n0).kindU.S6.sons->data[i_536491_839829468]); { Ropeobj180006* LOC30; TY534811 LOC31; Ropeobj180006* LOC32; if (!!(((*k0).kind == ((Tnodekind294020) 3)))) goto LA28; LOC30 = (Ropeobj180006*)0; LOC30 = rope_180401_2381377266(((NI64) (i_536491_839829468))); sname0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_91), LOC30); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = ae0; LOC31[1] = sname0; LOC32 = (Ropeobj180006*)0; LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2); a0 = genrecordfieldsaux_536421_839829468(m0, k0, LOC32, rectype0, check0); { TY180507 LOC37; if (!!((a0 == NIM_NIL))) goto LA35; add_180487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92)); add_180482_2381377266(&unionbody0, a0); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = sname0; addf_181205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1); } LA35: ; } goto LA26; LA28: ; { Ropeobj180006* LOC39; LOC39 = (Ropeobj180006*)0; LOC39 = genrecordfieldsaux_536421_839829468(m0, k0, ae0, rectype0, check0); add_180482_2381377266(&unionbody0, LOC39); } LA26: ; } break; default: { internalerror_198113_155036129(((NimStringDesc*) &T839829468_94)); } break; } res_536632_839829468 += ((NI) 1); } LA24: ; } } { TY534811 LOC45; if (!!((unionbody0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = unionbody0; LOC45[1] = uname0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2); } LA43: ; } break; case ((Tnodekind294020) 3): { field0 = (*n0).kindU.S4.sym; { if (!((*(*field0).typ).kind == ((Ttypekind294244) 62))) goto LA49; goto BeforeRet; } LA49: ; sname0 = manglerecfieldname_536361_839829468(field0, rectype0); { TY534811 LOC55; if (!!((accessexpr0 == NIM_NIL))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = accessexpr0; LOC55[1] = sname0; ae0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2); } goto LA51; LA53: ; { ae0 = sname0; } LA51: ; fillloc_534282_839829468((&(*field0).loc), ((Tlockind294808) 5), (*field0).typ, ae0, ((Tstorageloc294812) 0)); { NIM_BOOL LOC59; Ttype294840* fieldtype0; LOC59 = (NIM_BOOL)0; LOC59 = isimportedcpptype_535476_839829468(rectype0); if (!!(LOC59)) goto LA60; fieldtype0 = skiptypes_298099_850551059((*field0).loc.t, IL64(211106232576256)); { NIM_BOOL LOC64; TY534811 LOC68; Ttype294840* LOC69; LOC64 = (NIM_BOOL)0; LOC64 = ((*fieldtype0).kind == ((Ttypekind294244) 16)); if (!(LOC64)) goto LA65; LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0); LA65: ; if (!LOC64) goto LA66; memset((void*)LOC68, 0, sizeof(LOC68)); LOC69 = (Ttype294840*)0; LOC69 = elemtype_322394_3876443242(fieldtype0); LOC68[0] = gettypedescaux_535503_839829468(m0, LOC69, check0); LOC68[1] = sname0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2); } goto LA62; LA66: ; { TY534811 LOC73; if (!((*fieldtype0).kind == ((Ttypekind294244) 24))) goto LA71; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = gettypedescweak_536079_839829468(m0, (*field0).loc.t, check0); LOC73[1] = sname0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2); } goto LA62; LA71: ; { TY537238 LOC77; NimStringDesc* LOC78; if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75; memset((void*)LOC77, 0, sizeof(LOC77)); LOC77[0] = gettypedescaux_535503_839829468(m0, (*field0).loc.t, check0); LOC77[1] = sname0; LOC78 = (NimStringDesc*)0; LOC78 = nimIntToStr((*field0).kindU.S4.bitsize); LOC77[2] = rope_180277_2381377266(LOC78); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3); } goto LA62; LA75: ; { TY534811 LOC80; memset((void*)LOC80, 0, sizeof(LOC80)); LOC80[0] = gettypedescaux_535503_839829468(m0, (*field0).loc.t, check0); LOC80[1] = sname0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2); } LA62: ; } LA60: ; } break; default: { internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99)); } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj180006*, getrecordfields_536636_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = genrecordfieldsaux_536421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0); return result0; } N_NIMCALL(Ropeobj180006*, getrecorddesc_536643_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0) { Ropeobj180006* result0; NIM_BOOL hasfield0; Ropeobj180006* attribute0; TY537238 LOC6; Ropeobj180006* desc0; NimStringDesc* LOC46; result0 = (Ropeobj180006*)0; hasfield0 = NIM_FALSE; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 21))&31U)))!=0)) goto LA3; attribute0 = rope_180277_2381377266(Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field19); } goto LA1; LA3: ; { attribute0 = NIM_NIL; } LA1: ; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = structorunion_536001_839829468(typ0); LOC6[1] = name0; LOC6[2] = attribute0; result0 = ropecg_534407_839829468(m0, Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field18, LOC6, 3); { if (!((*typ0).kind == ((Ttypekind294244) 17))) goto LA9; { if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; TY535289 LOC23; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = !(((*typ0).sym == NIM_NIL)); if (!(LOC18)) goto LA19; LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0); LA19: ; LOC17 = LOC18; if (LOC17) goto LA20; LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0); LA20: ; if (!LOC17) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0); } goto LA15; LA21: ; { TY534811 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = name0; LOC25[1] = attribute0; appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2); hasfield0 = NIM_TRUE; } LA15: ; } goto LA11; LA13: ; { NIM_BOOL LOC27; TY180507 LOC31; Ttype294840* LOC32; LOC27 = (NIM_BOOL)0; LOC27 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC27) goto LA28; LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA28: ; if (!LOC27) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ttype294840*)0; LOC32 = skiptypes_298099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC31[0] = gettypedescaux_535503_839829468(m0, LOC32, check0); appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1); hasfield0 = NIM_TRUE; } goto LA11; LA29: ; { TY180507 LOC34; Ttype294840* LOC35; memset((void*)LOC34, 0, sizeof(LOC34)); LOC35 = (Ttype294840*)0; LOC35 = skiptypes_298099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC34[0] = gettypedescaux_535503_839829468(m0, LOC35, check0); appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1); hasfield0 = NIM_TRUE; } LA11: ; } goto LA7; LA9: ; { TY180507 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = name0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1); } LA7: ; desc0 = getrecordfields_536636_839829468(m0, typ0, check0); { NIM_BOOL LOC40; TY535289 LOC44; LOC40 = (NIM_BOOL)0; LOC40 = (desc0 == NIM_NIL); if (!(LOC40)) goto LA41; LOC40 = !(hasfield0); LA41: ; if (!LOC40) goto LA42; memset((void*)LOC44, 0, sizeof(LOC44)); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0); } goto LA38; LA42: ; { add_180482_2381377266(&result0, desc0); } LA38: ; LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(tnl_178644_4151366050->Sup.len + 2); appendString(LOC46, ((NimStringDesc*) &T839829468_101)); appendString(LOC46, tnl_178644_4151366050); add_180487_2381377266(&result0, LOC46); return result0; } N_NIMCALL(Ropeobj180006*, gettupledesc_536777_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0) { Ropeobj180006* result0; TY534811 LOC1; Ropeobj180006* desc0; NimStringDesc* LOC13; result0 = (Ropeobj180006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = structorunion_536001_839829468(typ0); LOC1[1] = name0; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2); desc0 = NIM_NIL; { NI i_536799_839829468; NI HEX3Atmp_536820_839829468; NI LOC3; NI res_536823_839829468; i_536799_839829468 = (NI)0; HEX3Atmp_536820_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_297327_850551059(typ0); HEX3Atmp_536820_839829468 = (NI)(LOC3 - ((NI) 1)); res_536823_839829468 = ((NI) 0); { while (1) { TY534811 LOC6; if (!(res_536823_839829468 <= HEX3Atmp_536820_839829468)) goto LA5; i_536799_839829468 = res_536823_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = gettypedescaux_535503_839829468(m0, (*typ0).sons->data[i_536799_839829468], check0); LOC6[1] = rope_180401_2381377266(((NI64) (i_536799_839829468))); addf_181205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2); res_536823_839829468 += ((NI) 1); } LA5: ; } } { NimStringDesc* LOC11; if (!(desc0 == NIM_NIL)) goto LA9; LOC11 = (NimStringDesc*)0; LOC11 = rawNewString(tnl_178644_4151366050->Sup.len + 11); appendString(LOC11, ((NimStringDesc*) &T839829468_104)); appendString(LOC11, tnl_178644_4151366050); add_180487_2381377266(&result0, LOC11); } goto LA7; LA9: ; { add_180482_2381377266(&result0, desc0); } LA7: ; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString(tnl_178644_4151366050->Sup.len + 2); appendString(LOC13, ((NimStringDesc*) &T839829468_101)); appendString(LOC13, tnl_178644_4151366050); add_180487_2381377266(&result0, LOC13); return result0; } N_NIMCALL(Ropeobj180006*, gettypedescaux_535503_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0) { Ropeobj180006* result0; Ttype294840* t_536942_839829468; { result0 = (Ropeobj180006*)0; t_536942_839829468 = getuniquetype_530640_2036603609(typ0); { if (!(t_536942_839829468 == NIM_NIL)) goto LA3; internalerror_198113_155036129(((NimStringDesc*) &T839829468_27)); } LA3: ; { if (!!(((*t_536942_839829468).sym == NIM_NIL))) goto LA7; useheader_534369_839829468(m0, (*t_536942_839829468).sym); } LA7: ; result0 = gettypepre_535972_839829468(m0, t_536942_839829468); { if (!!((result0 == NIM_NIL))) goto LA11; goto BeforeRet; } LA11: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_270862_2627731572(check0, (*t_536942_839829468).Sup.id); if (!LOC15) goto LA16; { NIM_BOOL LOC20; NimStringDesc* LOC24; NimStringDesc* LOC25; LOC20 = (NIM_BOOL)0; LOC20 = isimportedcpptype_535476_839829468(typ0); if (LOC20) goto LA21; LOC20 = isimportedcpptype_535476_839829468(t_536942_839829468); LA21: ; if (!!(LOC20)) goto LA22; LOC24 = (NimStringDesc*)0; LOC25 = (NimStringDesc*)0; LOC25 = typetostring_322017_3876443242(typ0, ((Tprefereddesc322011) 0)); LOC24 = rawNewString(LOC25->Sup.len + 28); appendString(LOC24, ((NimStringDesc*) &T839829468_51)); appendString(LOC24, LOC25); internalerror_198113_155036129(LOC24); } LA22: ; } LA16: ; switch ((*t_536942_839829468).kind) { case ((Ttypekind294244) 22): case ((Ttypekind294244) 21): case ((Ttypekind294244) 23): { NimStringDesc* star0; Ttype294840* et0; Ttype294840* LOC38; Ttype294840* etb0; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC33; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*t_536942_839829468).kind == ((Ttypekind294244) 23)); if (!(LOC30)) goto LA31; LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0)); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC33) goto LA34; LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA34: ; LOC29 = LOC33; LA32: ; if (!LOC29) goto LA35; star0 = copyString(((NimStringDesc*) &T839829468_52)); } goto LA27; LA35: ; { star0 = copyString(((NimStringDesc*) &T839829468_53)); } LA27: ; LOC38 = (Ttype294840*)0; LOC38 = skiptypes_298099_850551059(typ0, IL64(211106232576256)); et0 = lastson_297377_850551059(LOC38); etb0 = skiptypes_298099_850551059(et0, IL64(211106232576256)); { if (!((*etb0).kind == ((Ttypekind294244) 4) || (*etb0).kind == ((Ttypekind294244) 16) || (*etb0).kind == ((Ttypekind294244) 27) || (*etb0).kind == ((Ttypekind294244) 48))) goto LA41; et0 = elemtype_322394_3876443242(etb0); etb0 = skiptypes_298099_850551059(et0, IL64(211106232576256)); star0->data[((NI) 0)] = 42; } LA41: ; switch ((*etb0).kind) { case ((Ttypekind294244) 17): case ((Ttypekind294244) 18): { { NIM_BOOL LOC46; Ropeobj180006* LOC50; LOC46 = (NIM_BOOL)0; LOC46 = isimportedcpptype_535476_839829468(etb0); if (!(LOC46)) goto LA47; LOC46 = ((*et0).kind == ((Ttypekind294244) 11)); LA47: ; if (!LOC46) goto LA48; LOC50 = (Ropeobj180006*)0; LOC50 = gettypedescaux_535503_839829468(m0, et0, check0); result0 = HEX26_180447_2381377266(LOC50, star0); } goto LA44; LA48: ; { Ttype294840* x0; Ropeobj180006* name0; Tidobj201004* LOC52; TNimObject* LOC53; x0 = getuniquetype_530640_2036603609(etb0); name0 = gettypeforward_536039_839829468(m0, x0); result0 = HEX26_180447_2381377266(name0, star0); LOC52 = (Tidobj201004*)0; LOC52 = &t_536942_839829468->Sup; LOC53 = (TNimObject*)0; LOC53 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC52, LOC53); pushtype_535958_839829468(m0, x0); } LA44: ; } break; case ((Ttypekind294244) 24): { Ttype294840* x0; Ropeobj180006* name0; Ropeobj180006* LOC55; Tidobj201004* LOC56; TNimObject* LOC57; x0 = getuniquetype_530640_2036603609(etb0); name0 = gettypeforward_536039_839829468(m0, x0); LOC55 = (Ropeobj180006*)0; LOC55 = HEX26_180447_2381377266(name0, ((NimStringDesc*) &T839829468_53)); result0 = HEX26_180447_2381377266(LOC55, star0); LOC56 = (Tidobj201004*)0; LOC56 = &t_536942_839829468->Sup; LOC57 = (TNimObject*)0; LOC57 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC56, LOC57); pushtype_535958_839829468(m0, x0); } break; default: { Ropeobj180006* LOC59; Tidobj201004* LOC60; TNimObject* LOC61; LOC59 = (Ropeobj180006*)0; LOC59 = gettypedescaux_535503_839829468(m0, et0, check0); result0 = HEX26_180447_2381377266(LOC59, star0); LOC60 = (Tidobj201004*)0; LOC60 = &t_536942_839829468->Sup; LOC61 = (TNimObject*)0; LOC61 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC60, LOC61); } break; } } break; case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { Ropeobj180006* LOC63; Tidobj201004* LOC64; TNimObject* LOC65; LOC63 = (Ropeobj180006*)0; LOC63 = gettypedescweak_536079_839829468(m0, (*t_536942_839829468).sons->data[((NI) 0)], check0); result0 = HEX26_180447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53)); LOC64 = (Tidobj201004*)0; LOC64 = &t_536942_839829468->Sup; LOC65 = (TNimObject*)0; LOC65 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC64, LOC65); } break; case ((Ttypekind294244) 20): case ((Ttypekind294244) 14): { Ttype294840* t0; { if (!((*t_536942_839829468).kind == ((Ttypekind294244) 20))) goto LA69; t0 = lastson_297377_850551059(t_536942_839829468); } goto LA67; LA69: ; { t0 = t_536942_839829468; } LA67: ; result0 = cachegettype_535591_839829468((*m0).typecache, t0); { if (!(result0 == NIM_NIL)) goto LA74; result0 = gettypename_535313_839829468(t0); { NIM_BOOL LOC78; NIM_BOOL LOC80; Tidobj201004* LOC84; TNimObject* LOC85; NI size0; NU32 owner0; LOC78 = (NIM_BOOL)0; LOC78 = isimportedcpptype_535476_839829468(t0); if (LOC78) goto LA79; LOC80 = (NIM_BOOL)0; LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0); if (!(LOC80)) goto LA81; LOC80 = ((*(*t0).sym).magic == ((Tmagic294524) 0)); LA81: ; LOC78 = LOC80; LA79: ; if (!!(LOC78)) goto LA82; LOC84 = (Tidobj201004*)0; LOC84 = &t0->Sup; LOC85 = (TNimObject*)0; LOC85 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC84, LOC85); size0 = (NI)0; { NI64 LOC88; TY180507 LOC91; LOC88 = (NI64)0; LOC88 = firstord_322001_3876443242(t0); if (!(LOC88 < IL64(0))) goto LA89; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1); size0 = ((NI) 4); } goto LA86; LA89: ; { NI64 LOC93; LOC93 = (NI64)0; LOC93 = getsize_322135_3876443242(t0); size0 = ((NI) (LOC93)); switch (size0) { case ((NI) 1): { TY180507 LOC95; memset((void*)LOC95, 0, sizeof(LOC95)); LOC95[0] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1); } break; case ((NI) 2): { TY180507 LOC97; memset((void*)LOC97, 0, sizeof(LOC97)); LOC97[0] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1); } break; case ((NI) 4): { TY180507 LOC99; memset((void*)LOC99, 0, sizeof(LOC99)); LOC99[0] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1); } break; case ((NI) 8): { TY180507 LOC101; memset((void*)LOC101, 0, sizeof(LOC101)); LOC101[0] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1); } break; default: { internalerror_198100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63)); } break; } } LA86: ; owner0 = hashowner_534977_839829468((*t0).sym); { NIM_BOOL LOC105; TY205017* vals0; Enumdesc205007 LOC114; LOC105 = (NIM_BOOL)0; LOC105 = hasenum_205230_1926258066(gdebuginfo_205470_1926258066, (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0); if (!!(LOC105)) goto LA106; vals0 = (TY205017*) newSeq((&NTI205017), 0); { NI i_537144_839829468; NI HEX3Atmp_537648_839829468; NI LOC109; NI res_537651_839829468; i_537144_839829468 = (NI)0; HEX3Atmp_537648_839829468 = (NI)0; LOC109 = (NI)0; LOC109 = len_295081_850551059((*t0).n); HEX3Atmp_537648_839829468 = (NI)(LOC109 - ((NI) 1)); res_537651_839829468 = ((NI) 0); { while (1) { Tsym294834* field0; TY205018 LOC112; NimStringDesc* LOC113; if (!(res_537651_839829468 <= HEX3Atmp_537648_839829468)) goto LA111; i_537144_839829468 = res_537651_839829468; field0 = (*(*(*t0).n).kindU.S6.sons->data[i_537144_839829468]).kindU.S4.sym; memset((void*)(&LOC112), 0, sizeof(LOC112)); LOC112.Field0 = copyString((*(*field0).name).s); LOC112.Field1 = (*field0).position; vals0 = (TY205017*) incrSeqV2(&(vals0)->Sup, sizeof(TY205018)); LOC113 = (NimStringDesc*)0; LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0); if (LOC113) nimGCunrefNoCycle(LOC113); vals0->data[vals0->Sup.len].Field1 = LOC112.Field1; ++vals0->Sup.len; res_537651_839829468 += ((NI) 1); } LA111: ; } } memset((void*)(&LOC114), 0, sizeof(LOC114)); memset((void*)(&LOC114), 0, sizeof(LOC114)); LOC114.size = size0; LOC114.owner = owner0; LOC114.id = (*(*t0).sym).Sup.id; LOC114.name = copyString((*(*(*t0).sym).name).s); genericSeqAssign((&LOC114.values), vals0, (&NTI205017)); registerenum_205419_1926258066((&gdebuginfo_205470_1926258066), (&LOC114)); } LA106: ; } LA82: ; } LA74: ; } break; case ((Ttypekind294244) 25): { Tidobj201004* LOC116; TNimObject* LOC117; Ropeobj180006* rettype0; Ropeobj180006* desc0; result0 = gettypename_535313_839829468(t_536942_839829468); LOC116 = (Tidobj201004*)0; LOC116 = &t_536942_839829468->Sup; LOC117 = (TNimObject*)0; LOC117 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC116, LOC117); rettype0 = (Ropeobj180006*)0; desc0 = (Ropeobj180006*)0; genprocparams_536115_839829468(m0, t_536942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE); { NIM_BOOL LOC120; LOC120 = (NIM_BOOL)0; LOC120 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC120)) goto LA121; { TY537235 LOC127; if (!!(((*t_536942_839829468).callconv == ((Tcallingconvention294002) 8)))) goto LA125; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rope_180277_2381377266(Callingconvtostr_535585_839829468[((*t_536942_839829468).callconv)- 0]); LOC127[1] = rettype0; LOC127[2] = result0; LOC127[3] = desc0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4); } goto LA123; LA125: ; { TY537238 LOC129; memset((void*)LOC129, 0, sizeof(LOC129)); LOC129[0] = result0; LOC129[1] = rettype0; LOC129[2] = desc0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3); } LA123: ; } LA121: ; } break; case ((Ttypekind294244) 24): { Tidobj201004* LOC144; Ropeobj180006* LOC145; TNimObject* LOC146; result0 = cachegettype_535591_839829468((*m0).forwtypecache, t_536942_839829468); { Tidobj201004* LOC142; TNimObject* LOC143; if (!(result0 == NIM_NIL)) goto LA133; result0 = gettypename_535313_839829468(t_536942_839829468); { NIM_BOOL LOC137; NimStringDesc* LOC140; TY534811 LOC141; LOC137 = (NIM_BOOL)0; LOC137 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC137)) goto LA138; LOC140 = (NimStringDesc*)0; LOC140 = getforwardstructformat_536015_839829468(m0); memset((void*)LOC141, 0, sizeof(LOC141)); LOC141[0] = structorunion_536001_839829468(t_536942_839829468); LOC141[1] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 2))- 0], LOC140, LOC141, 2); } LA138: ; LOC142 = (Tidobj201004*)0; LOC142 = &t_536942_839829468->Sup; LOC143 = (TNimObject*)0; LOC143 = &result0->Sup; idtableput_301094_2984716966((&(*m0).forwtypecache), LOC142, LOC143); } LA133: ; LOC144 = (Tidobj201004*)0; LOC144 = &t_536942_839829468->Sup; LOC145 = (Ropeobj180006*)0; LOC145 = HEX26_180447_2381377266(result0, ((NimStringDesc*) &T839829468_53)); LOC146 = (TNimObject*)0; LOC146 = &LOC145->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC144, LOC146); { NIM_BOOL LOC149; LOC149 = (NIM_BOOL)0; LOC149 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC149)) goto LA150; { Ttype294840* LOC154; NimStringDesc* LOC157; NimStringDesc* LOC158; TY534811 LOC166; LOC154 = (Ttype294840*)0; LOC154 = skiptypes_298099_850551059((*t_536942_839829468).sons->data[((NI) 0)], IL64(211106232576256)); if (!!(((*LOC154).kind == ((Ttypekind294244) 3)))) goto LA155; LOC157 = (NimStringDesc*)0; LOC158 = (NimStringDesc*)0; { NIM_BOOL LOC161; LOC161 = (NIM_BOOL)0; LOC161 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC161) goto LA162; LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA162: ; if (!LOC161) goto LA163; LOC158 = copyString(((NimStringDesc*) &T839829468_76)); } goto LA159; LA163: ; { LOC158 = copyString(((NimStringDesc*) &T839829468_77)); } LA159: ; LOC157 = rawNewString(LOC158->Sup.len + 31); appendString(LOC157, LOC158); appendString(LOC157, ((NimStringDesc*) &T839829468_78)); memset((void*)LOC166, 0, sizeof(LOC166)); LOC166[0] = gettypedescaux_535503_839829468(m0, (*t_536942_839829468).sons->data[((NI) 0)], check0); LOC166[1] = result0; appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 4))- 0], LOC157, LOC166, 2); } goto LA152; LA155: ; { result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_79)); } LA152: ; } LA150: ; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_53)); } break; case ((Ttypekind294244) 4): case ((Ttypekind294244) 16): { NI64 n0; Tidobj201004* LOC173; TNimObject* LOC174; n0 = lengthord_322007_3876443242(t_536942_839829468); { if (!(n0 <= IL64(0))) goto LA171; n0 = IL64(1); } LA171: ; result0 = gettypename_535313_839829468(t_536942_839829468); LOC173 = (Tidobj201004*)0; LOC173 = &t_536942_839829468->Sup; LOC174 = (TNimObject*)0; LOC174 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC173, LOC174); { NIM_BOOL LOC177; Ropeobj180006* foo0; TY537238 LOC180; LOC177 = (NIM_BOOL)0; LOC177 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC177)) goto LA178; foo0 = gettypedescaux_535503_839829468(m0, (*t_536942_839829468).sons->data[((NI) 1)], check0); memset((void*)LOC180, 0, sizeof(LOC180)); LOC180[0] = foo0; LOC180[1] = result0; LOC180[2] = rope_180401_2381377266(n0); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3); } LA178: ; } break; case ((Ttypekind294244) 17): case ((Ttypekind294244) 18): { { NIM_BOOL LOC184; Ropeobj180006* cppname0; NI i0; NI chunkstart0; Ropeobj180006* LOC226; LOC184 = (NIM_BOOL)0; LOC184 = isimportedcpptype_535476_839829468(t_536942_839829468); if (!(LOC184)) goto LA185; LOC184 = ((*typ0).kind == ((Ttypekind294244) 11)); LA185: ; if (!LOC184) goto LA186; cppname0 = gettypename_535313_839829468(t_536942_839829468); i0 = ((NI) 0); chunkstart0 = ((NI) 0); { while (1) { if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189; { NI chunkend0; NI idx0; NI stars0; if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192; chunkend0 = (i0 - 1); idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC196; NimStringDesc* LOC199; Ttype294840* typeinslot0; LOC196 = (NIM_BOOL)0; LOC196 = scancppgenericslot_536827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0)); if (!LOC196) goto LA197; LOC199 = (NimStringDesc*)0; LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0); add_180487_2381377266(&result0, LOC199); chunkstart0 = i0; typeinslot0 = resolvestarsincpptype_536891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0); { NIM_BOOL LOC202; TY535289 LOC206; Ropeobj180006* LOC207; LOC202 = (NIM_BOOL)0; LOC202 = (typeinslot0 == NIM_NIL); if (LOC202) goto LA203; LOC202 = ((*typeinslot0).kind == ((Ttypekind294244) 62)); LA203: ; if (!LOC202) goto LA204; memset((void*)LOC206, 0, sizeof(LOC206)); LOC207 = (Ropeobj180006*)0; LOC207 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0); add_180482_2381377266(&result0, LOC207); } goto LA200; LA204: ; { Ropeobj180006* LOC209; LOC209 = (Ropeobj180006*)0; LOC209 = gettypedescaux_535503_839829468(m0, typeinslot0, check0); add_180482_2381377266(&result0, LOC209); } LA200: ; } LA197: ; } goto LA190; LA192: ; { i0 += ((NI) 1); } LA190: ; } LA189: ; } { NimStringDesc* LOC215; if (!!((chunkstart0 == ((NI) 0)))) goto LA213; LOC215 = (NimStringDesc*)0; LOC215 = copyStr((*cppname0).data, chunkstart0); add_180487_2381377266(&result0, LOC215); } goto LA211; LA213: ; { result0 = HEX26_180447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82)); { NI i_537516_839829468; NI HEX3Atmp_537664_839829468; NI LOC218; NI res_537667_839829468; i_537516_839829468 = (NI)0; HEX3Atmp_537664_839829468 = (NI)0; LOC218 = (NI)0; LOC218 = len_297339_850551059(typ0); HEX3Atmp_537664_839829468 = (NI)(LOC218 - ((NI) 2)); res_537667_839829468 = ((NI) 1); { while (1) { Ropeobj180006* LOC225; if (!(res_537667_839829468 <= HEX3Atmp_537664_839829468)) goto LA220; i_537516_839829468 = res_537667_839829468; { if (!(((NI) 1) < i_537516_839829468)) goto LA223; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_83)); } LA223: ; LOC225 = (Ropeobj180006*)0; LOC225 = gettypedescaux_535503_839829468(m0, (*typ0).sons->data[i_537516_839829468], check0); add_180482_2381377266(&result0, LOC225); res_537667_839829468 += ((NI) 1); } LA220: ; } } add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_84)); } LA211: ; LOC226 = (Ropeobj180006*)0; LOC226 = getrecorddesc_536643_839829468(m0, t_536942_839829468, result0, check0); } goto LA182; LA186: ; { Tidobj201004* LOC241; TNimObject* LOC242; Ropeobj180006* recdesc0; result0 = cachegettype_535591_839829468((*m0).forwtypecache, t_536942_839829468); { Tidobj201004* LOC239; TNimObject* LOC240; if (!(result0 == NIM_NIL)) goto LA230; result0 = gettypename_535313_839829468(t_536942_839829468); { NIM_BOOL LOC234; NimStringDesc* LOC237; TY534811 LOC238; LOC234 = (NIM_BOOL)0; LOC234 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC234)) goto LA235; LOC237 = (NimStringDesc*)0; LOC237 = getforwardstructformat_536015_839829468(m0); memset((void*)LOC238, 0, sizeof(LOC238)); LOC238[0] = structorunion_536001_839829468(t_536942_839829468); LOC238[1] = result0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 2))- 0], LOC237, LOC238, 2); } LA235: ; LOC239 = (Tidobj201004*)0; LOC239 = &t_536942_839829468->Sup; LOC240 = (TNimObject*)0; LOC240 = &result0->Sup; idtableput_301094_2984716966((&(*m0).forwtypecache), LOC239, LOC240); } LA230: ; LOC241 = (Tidobj201004*)0; LOC241 = &t_536942_839829468->Sup; LOC242 = (TNimObject*)0; LOC242 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC241, LOC242); { if (!!(((*t_536942_839829468).kind == ((Ttypekind294244) 18)))) goto LA245; recdesc0 = getrecorddesc_536643_839829468(m0, t_536942_839829468, result0, check0); } goto LA243; LA245: ; { recdesc0 = gettupledesc_536777_839829468(m0, t_536942_839829468, result0, check0); } LA243: ; { NIM_BOOL LOC250; LOC250 = (NIM_BOOL)0; LOC250 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC250)) goto LA251; add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], recdesc0); } LA251: ; } LA182: ; } break; case ((Ttypekind294244) 19): { Ttype294840* LOC254; Ropeobj180006* LOC255; Tidobj201004* LOC256; TNimObject* LOC257; LOC254 = (Ttype294840*)0; LOC254 = lastson_297377_850551059(t_536942_839829468); LOC255 = (Ropeobj180006*)0; LOC255 = gettypename_535313_839829468(LOC254); result0 = HEX26_180447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105)); LOC256 = (Tidobj201004*)0; LOC256 = &t_536942_839829468->Sup; LOC257 = (TNimObject*)0; LOC257 = &result0->Sup; idtableput_301094_2984716966((&(*m0).typecache), LOC256, LOC257); { NIM_BOOL LOC260; NI s0; NI64 LOC263; LOC260 = (NIM_BOOL)0; LOC260 = isimportedtype_535449_839829468(t_536942_839829468); if (!!(LOC260)) goto LA261; LOC263 = (NI64)0; LOC263 = getsize_322135_3876443242(t_536942_839829468); s0 = ((NI) (LOC263)); switch (s0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { TY534811 LOC265; memset((void*)LOC265, 0, sizeof(LOC265)); LOC265[0] = result0; LOC265[1] = rope_180401_2381377266(((NI64) ((NI)(s0 * ((NI) 8))))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2); } break; default: { TY534811 LOC267; NI64 LOC268; memset((void*)LOC267, 0, sizeof(LOC267)); LOC267[0] = result0; LOC268 = (NI64)0; LOC268 = getsize_322135_3876443242(t_536942_839829468); LOC267[1] = rope_180401_2381377266(LOC268); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2); } break; } } LA261: ; } break; case ((Ttypekind294244) 11): case ((Ttypekind294244) 13): case ((Ttypekind294244) 15): case ((Ttypekind294244) 46): case ((Ttypekind294244) 47): case ((Ttypekind294244) 49): case ((Ttypekind294244) 8): { Ttype294840* LOC270; LOC270 = (Ttype294840*)0; LOC270 = lastson_297377_850551059(t_536942_839829468); result0 = gettypedescaux_535503_839829468(m0, LOC270, check0); } break; default: { NimStringDesc* LOC272; LOC272 = (NimStringDesc*)0; LOC272 = rawNewString(reprEnum((NI)(*t_536942_839829468).kind, (&NTI294244))->Sup.len + 16); appendString(LOC272, ((NimStringDesc*) &T839829468_108)); appendString(LOC272, reprEnum((NI)(*t_536942_839829468).kind, (&NTI294244))); appendChar(LOC272, 41); internalerror_198113_155036129(LOC272); result0 = NIM_NIL; } break; } excl_270841_2627731572(check0, (*t_536942_839829468).Sup.id); }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, iscompiletimeonly_330706_3876443242)(Ttype294840* t0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((*t0).kind == ((Ttypekind294244) 8) || (*t0).kind == ((Ttypekind294244) 59)); return result0; } N_NIMCALL(Tstorageloc294812, paramstorageloc_536098_839829468)(Tsym294834* param0) { Tstorageloc294812 result0; result0 = (Tstorageloc294812)0; { Ttype294840* LOC3; LOC3 = (Ttype294840*)0; LOC3 = skiptypes_298099_850551059((*param0).typ, 8388864); if (!!(((*LOC3).kind == ((Ttypekind294244) 16) || (*LOC3).kind == ((Ttypekind294244) 27) || (*LOC3).kind == ((Ttypekind294244) 48) || (*LOC3).kind == ((Ttypekind294244) 4)))) goto LA4; result0 = ((Tstorageloc294812) 2); } goto LA1; LA4: ; { result0 = ((Tstorageloc294812) 0); } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, ccgintroducedptr_535609_839829468)(Tsym294834* s0) { NIM_BOOL result0; Ttype294840* pt0; { result0 = (NIM_BOOL)0; pt0 = skiptypes_298099_850551059((*s0).typ, IL64(211106232576256)); { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag294431) 13))&31U)))!=0)) goto LA3; result0 = NIM_TRUE; goto BeforeRet; } goto LA1; LA3: ; { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag294431) 12))&31U)))!=0)) goto LA6; result0 = NIM_FALSE; goto BeforeRet; } goto LA1; LA6: ; LA1: ; switch ((*pt0).kind) { case ((Ttypekind294244) 17): { { NIM_BOOL LOC11; NI64 LOC13; LOC11 = (NIM_BOOL)0; LOC11 = (((*s0).options &(1U<<((NU)(((Toption171009) 18))&31U)))!=0); if (LOC11) goto LA12; LOC13 = (NI64)0; LOC13 = getsize_322135_3876443242(pt0); LOC11 = (((NI64) ((NI)(floatsize_178642_4151366050 * ((NI) 2)))) < LOC13); LA12: ; if (!LOC11) goto LA14; result0 = NIM_TRUE; } goto LA9; LA14: ; { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0); if (!(LOC17)) goto LA18; LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL); LA18: ; if (!LOC17) goto LA19; result0 = NIM_FALSE; } goto LA9; LA19: ; { result0 = NIM_TRUE; } LA9: ; } break; case ((Ttypekind294244) 18): { NIM_BOOL LOC23; NI64 LOC24; LOC23 = (NIM_BOOL)0; LOC24 = (NI64)0; LOC24 = getsize_322135_3876443242(pt0); LOC23 = (((NI64) ((NI)(floatsize_178642_4151366050 * ((NI) 2)))) < LOC24); if (LOC23) goto LA25; LOC23 = (((*s0).options &(1U<<((NU)(((Toption171009) 18))&31U)))!=0); LA25: ; result0 = LOC23; } break; default: { result0 = NIM_FALSE; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Tctypekind531007, mapreturntype_535445_839829468)(Ttype294840* typ0) { Tctypekind531007 result0; result0 = (Tctypekind531007)0; result0 = maptype_535393_839829468(typ0); return result0; } N_NIMCALL(void, genprocparams_536115_839829468)(Tcgen531027* m0, Ttype294840* t0, Ropeobj180006** rettype0, Ropeobj180006** params0, Intset270030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) { unsureAsgnRef((void**) (&(*params0)), NIM_NIL); { NIM_BOOL LOC3; TY535289 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL); if (LOC3) goto LA4; LOC3 = isinvalidreturntype_535548_839829468((*t0).sons->data[((NI) 0)]); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); unsureAsgnRef((void**) (&(*rettype0)), HEX25_180905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0)); } goto LA1; LA5: ; { unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_535503_839829468(m0, (*t0).sons->data[((NI) 0)], check0)); } LA1: ; { NI i_536152_839829468; NI HEX3Atmp_536353_839829468; NI LOC10; NI res_536356_839829468; i_536152_839829468 = (NI)0; HEX3Atmp_536353_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = sonslen_297351_850551059((*t0).n); HEX3Atmp_536353_839829468 = (NI)(LOC10 - ((NI) 1)); res_536356_839829468 = ((NI) 1); { while (1) { if (!(res_536356_839829468 <= HEX3Atmp_536353_839829468)) goto LA12; i_536152_839829468 = res_536356_839829468; { Tsym294834* param0; Ropeobj180006* LOC29; Tstorageloc294812 LOC30; TY535289 LOC45; Ropeobj180006* LOC46; Ttype294840* arr0; NI j0; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_536152_839829468]).kind == ((Tnodekind294020) 3)))) goto LA16; internalerror_198100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109)); } LA16: ; param0 = (*(*(*t0).n).kindU.S6.sons->data[i_536152_839829468]).kindU.S4.sym; { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = iscompiletimeonly_330706_3876443242((*param0).typ); if (!LOC20) goto LA21; goto LA13; } LA21: ; { TY535289 LOC27; Ropeobj180006* LOC28; if (!!(((*params0) == NIM_NIL))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj180006*)0; LOC28 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0); add_180482_2381377266(params0, LOC28); } LA25: ; LOC29 = (Ropeobj180006*)0; LOC29 = manglename_535205_839829468(param0); LOC30 = (Tstorageloc294812)0; LOC30 = paramstorageloc_536098_839829468(param0); fillloc_534282_839829468((&(*param0).loc), ((Tlockind294808) 4), (*param0).typ, LOC29, LOC30); { NIM_BOOL LOC33; Ropeobj180006* LOC36; TY535289 LOC37; Ropeobj180006* LOC38; LOC33 = (NIM_BOOL)0; LOC33 = ccgintroducedptr_535609_839829468(param0); if (!LOC33) goto LA34; LOC36 = (Ropeobj180006*)0; LOC36 = gettypedescweak_536079_839829468(m0, (*param0).typ, check0); add_180482_2381377266(params0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj180006*)0; LOC38 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0); add_180482_2381377266(params0, LOC38); (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc294812) 0); } goto LA31; LA34: ; { Ropeobj180006* LOC42; if (!weakdep0) goto LA40; LOC42 = (Ropeobj180006*)0; LOC42 = gettypedescweak_536079_839829468(m0, (*param0).typ, check0); add_180482_2381377266(params0, LOC42); } goto LA31; LA40: ; { Ropeobj180006* LOC44; LOC44 = (Ropeobj180006*)0; LOC44 = gettypedescaux_535503_839829468(m0, (*param0).typ, check0); add_180482_2381377266(params0, LOC44); } LA31: ; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj180006*)0; LOC46 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0); add_180482_2381377266(params0, LOC46); add_180482_2381377266(params0, (*param0).loc.r); arr0 = (*param0).typ; { if (!((*arr0).kind == ((Ttypekind294244) 23))) goto LA49; arr0 = (*arr0).sons->data[((NI) 0)]; } LA49: ; j0 = ((NI) 0); { while (1) { TY534811 LOC57; if (!((*arr0).kind == ((Ttypekind294244) 27) || (*arr0).kind == ((Ttypekind294244) 48))) goto LA52; { if (!((*(*param0).typ).kind == ((Ttypekind294244) 23))) goto LA55; (*param0).loc.s = ((Tstorageloc294812) 0); } LA55: ; memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = (*param0).loc.r; LOC57[1] = rope_180401_2381377266(((NI64) (j0))); addf_181205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2); j0 += ((NI) 1); arr0 = (*arr0).sons->data[((NI) 0)]; } LA52: ; } } LA13: ; res_536356_839829468 += ((NI) 1); } LA12: ; } } { NIM_BOOL LOC60; Ttype294840* arr0; TY535289 LOC76; LOC60 = (NIM_BOOL)0; LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); if (!(LOC60)) goto LA61; LOC60 = isinvalidreturntype_535548_839829468((*t0).sons->data[((NI) 0)]); LA61: ; if (!LOC60) goto LA62; arr0 = (*t0).sons->data[((NI) 0)]; { if (!!(((*params0) == NIM_NIL))) goto LA66; add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA66: ; { Tctypekind531007 LOC70; Ropeobj180006* LOC73; LOC70 = (Tctypekind531007)0; LOC70 = mapreturntype_535445_839829468((*t0).sons->data[((NI) 0)]); if (!!((LOC70 == ((Tctypekind531007) 17)))) goto LA71; LOC73 = (Ropeobj180006*)0; LOC73 = gettypedescweak_536079_839829468(m0, arr0, check0); add_180482_2381377266(params0, LOC73); add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_53)); } goto LA68; LA71: ; { Ropeobj180006* LOC75; LOC75 = (Ropeobj180006*)0; LOC75 = gettypedescaux_535503_839829468(m0, arr0, check0); add_180482_2381377266(params0, LOC75); } LA68: ; memset((void*)LOC76, 0, sizeof(LOC76)); addf_181205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0); } LA62: ; { NIM_BOOL LOC79; LOC79 = (NIM_BOOL)0; LOC79 = ((*t0).callconv == ((Tcallingconvention294002) 8)); if (!(LOC79)) goto LA80; LOC79 = declareenvironment0; LA80: ; if (!LOC79) goto LA81; { if (!!(((*params0) == NIM_NIL))) goto LA85; add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA85: ; add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_114)); } LA81: ; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0)) goto LA89; { if (!!(((*params0) == NIM_NIL))) goto LA93; add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA93: ; add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_115)); } LA89: ; { if (!((*params0) == NIM_NIL)) goto LA97; add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_116)); } goto LA95; LA97: ; { add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_117)); } LA95: ; unsureAsgnRef((void**) (&(*params0)), HEX26_180452_2381377266(((NimStringDesc*) &T839829468_118), (*params0))); } N_NIMCALL(Ropeobj180006*, genprocheader_537867_839829468)(Tcgen531027* m0, Tsym294834* prc0) { Ropeobj180006* result0; Ropeobj180006* rettype0; Ropeobj180006* params0; Intset270030 check0; Ropeobj180006* LOC13; result0 = (Ropeobj180006*)0; rettype0 = (Ropeobj180006*)0; params0 = (Ropeobj180006*)0; genclinedir_534813_839829468(&result0, (*prc0).info); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 5))&15U)))!=0)) goto LA3; { if (!(((*m0).flags &(1U<<((NU)(((Codegenflag531025) 3))&7U)))!=0)) goto LA7; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } goto LA5; LA7: ; { add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_23)); } LA5: ; } goto LA1; LA3: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention294002) 5))) goto LA11; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_24)); } goto LA1; LA11: ; LA1: ; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_270885_2627731572((&check0)); LOC13 = (Ropeobj180006*)0; LOC13 = manglename_535205_839829468(prc0); fillloc_534282_839829468((&(*prc0).loc), ((Tlockind294808) 7), (*prc0).typ, LOC13, ((Tstorageloc294812) 0)); genprocparams_536115_839829468(m0, (*prc0).typ, &rettype0, &params0, (&check0), NIM_TRUE, NIM_FALSE); { TY537235 LOC18; if (!(*prc0).constraint == 0) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_180277_2381377266(Callingconvtostr_535585_839829468[((*(*prc0).typ).callconv)- 0]); LOC18[1] = rettype0; LOC18[2] = (*prc0).loc.r; LOC18[3] = params0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4); } goto LA14; LA16: ; { TY537238 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rettype0; LOC20[1] = (*prc0).loc.r; LOC20[2] = params0; result0 = HEX25_180905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3); } LA14: ; return result0; } static N_INLINE(Tnode294802*, HEX5BHEX5D_295238_850551059)(Tnode294802* n0, NI i0) { Tnode294802* result0; result0 = (Tnode294802*)0; result0 = (*n0).kindU.S6.sons->data[i0]; return result0; } N_NIMCALL(Tnode294802*, easyresultasgn_562191_839829468)(Tnode294802* n0) { Tnode294802* result0; { result0 = (Tnode294802*)0; switch ((*n0).kind) { case ((Tnodekind294020) 115): case ((Tnodekind294020) 126): { NI i0; i0 = ((NI) 0); { while (1) { NIM_BOOL LOC4; NI LOC5; Tnode294802* LOC7; LOC4 = (NIM_BOOL)0; LOC5 = (NI)0; LOC5 = len_295081_850551059(n0); LOC4 = (i0 < LOC5); if (!(LOC4)) goto LA6; LOC7 = (Tnode294802*)0; LOC7 = HEX5BHEX5D_295238_850551059(n0, i0); LOC4 = ((*LOC7).kind == ((Tnodekind294020) 1) || (*LOC7).kind >= ((Tnodekind294020) 79) && (*LOC7).kind <= ((Tnodekind294020) 81) || (*LOC7).kind == ((Tnodekind294020) 84) || (*LOC7).kind == ((Tnodekind294020) 98) || (*LOC7).kind == ((Tnodekind294020) 101) || (*LOC7).kind == ((Tnodekind294020) 125)); LA6: ; if (!LOC4) goto LA3; i0 += ((NI) 1); } LA3: ; } { NI LOC10; Tnode294802* LOC13; LOC10 = (NI)0; LOC10 = len_295081_850551059(n0); if (!(i0 < LOC10)) goto LA11; LOC13 = (Tnode294802*)0; LOC13 = HEX5BHEX5D_295238_850551059(n0, i0); result0 = easyresultasgn_562191_839829468(LOC13); } LA11: ; } break; case ((Tnodekind294020) 73): case ((Tnodekind294020) 74): { { NIM_BOOL LOC17; Tnode294802* LOC18; Tnode294802* LOC20; LOC17 = (NIM_BOOL)0; LOC18 = (Tnode294802*)0; LOC18 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0)); LOC17 = ((*LOC18).kind == ((Tnodekind294020) 3)); if (!(LOC17)) goto LA19; LOC20 = (Tnode294802*)0; LOC20 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0)); LOC17 = (((Tsymkind294435) 11) == (*(*LOC20).kindU.S4.sym).kind); LA19: ; if (!LOC17) goto LA21; (*n0).flags |= ((NU16)1)<<((((Tnodeflag294427) 14))%(sizeof(NU16)*8)); result0 = HEX5BHEX5D_295238_850551059(n0, ((NI) 1)); goto BeforeRet; } LA21: ; } break; case ((Tnodekind294020) 109): { { NI LOC26; Tnode294802* LOC29; LOC26 = (NI)0; LOC26 = len_295081_850551059(n0); if (!(((NI) 0) < LOC26)) goto LA27; LOC29 = (Tnode294802*)0; LOC29 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0)); result0 = easyresultasgn_562191_839829468(LOC29); { if (!!((result0 == NIM_NIL))) goto LA32; (*n0).flags |= ((NU16)1)<<((((Tnodeflag294427) 14))%(sizeof(NU16)*8)); } LA32: ; } LA27: ; } break; default: { } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj180006*, gettypedesc_537671_839829468)(Tcgen531027* m0, Ttype294840* typ0) { Ropeobj180006* result0; Intset270030 check0; result0 = (Ropeobj180006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_270885_2627731572((&check0)); result0 = gettypedescaux_535503_839829468(m0, typ0, (&check0)); return result0; } N_NIMCALL(Ropeobj180006*, localvardecl_540532_839829468)(Tcproc531021* p0, Tsym294834* s0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { Ropeobj180006* LOC5; if (!((*s0).loc.k == ((Tlockind294808) 0))) goto LA3; LOC5 = (Ropeobj180006*)0; LOC5 = manglename_535205_839829468(s0); fillloc_534282_839829468((&(*s0).loc), ((Tlockind294808) 2), (*s0).typ, LOC5, ((Tstorageloc294812) 2)); { if (!((*s0).kind == ((Tsymkind294435) 9))) goto LA8; (*s0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 2))%(sizeof(NU16)*8)); } LA8: ; } LA3: ; result0 = gettypedesc_537671_839829468((*p0).module, (*s0).loc.t); { if (!(*s0).constraint == 0) goto LA12; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 8))&31U)))!=0)) goto LA16; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_121)); } LA16: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 7))&31U)))!=0)) goto LA20; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_122)); } LA20: ; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_111)); add_180482_2381377266(&result0, (*s0).loc.r); } goto LA10; LA12: ; { TY534811 LOC23; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = result0; LOC23[1] = (*s0).loc.r; result0 = HEX25_180905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2); } LA10: ; return result0; } N_NIMCALL(void, initloc_534273_839829468)(Tloc294816* result0, Tlockind294808 k0, Ttype294840* typ0, Tstorageloc294812 s0) { (*result0).k = k0; (*result0).s = s0; unsureAsgnRef((void**) (&(*result0).t), typ0); unsureAsgnRef((void**) (&(*result0).r), NIM_NIL); (*result0).flags = 0; } N_NIMCALL(void, initlocexprsingleuse_541289_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0) { initloc_534273_839829468(result0, ((Tlockind294808) 0), (*e0).typ, ((Tstorageloc294812) 0)); (*result0).flags |= ((NU16)1)<<((((Tlocflag294810) 8))%(sizeof(NU16)*8)); expr_541248_839829468(p0, e0, result0); } static N_INLINE(Ropeobj180006**, s_531179_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0) { Ropeobj180006** result0; result0 = (Ropeobj180006**)0; result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0]; return result0; } N_NIMCALL(Ropeobj180006*, indentline_534656_839829468)(Tcproc531021* p0, Ropeobj180006* r0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = r0; { NI i_534680_839829468; NI HEX3Atmp_534683_839829468; NI res_534686_839829468; i_534680_839829468 = (NI)0; HEX3Atmp_534683_839829468 = (NI)0; HEX3Atmp_534683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); res_534686_839829468 = ((NI) 0); { while (1) { if (!(res_534686_839829468 <= HEX3Atmp_534683_839829468)) goto LA3; i_534680_839829468 = res_534686_839829468; prepend_180893_2381377266(&result0, indent_534655_839829468); res_534686_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(void, linefmt_534714_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006** LOC1; Ropeobj180006* LOC2; Ropeobj180006* LOC3; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj180006*)0; LOC3 = indentline_534656_839829468(p0, LOC2); add_180482_2381377266(LOC1, LOC3); } N_NIMCALL(Ropeobj180006*, rdloc_540188_839829468)(Tloc294816 a0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = a0.r; { TY180507 LOC5; if (!((a0.flags &(1U<<((NU)(((Tlocflag294810) 0))&15U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = result0; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(void, line_534690_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, Ropeobj180006* r0) { Ropeobj180006** LOC1; Ropeobj180006* LOC2; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = indentline_534656_839829468(p0, r0); add_180482_2381377266(LOC1, LOC2); } N_NIMCALL(void, linef_534700_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006** LOC1; Ropeobj180006* LOC2; Ropeobj180006* LOC3; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = HEX25_180905_2381377266(frmt0, args0, args0Len0); LOC3 = (Ropeobj180006*)0; LOC3 = indentline_534656_839829468(p0, LOC2); add_180482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentypeinfoauxbase_537960_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0, Ropeobj180006* base0) { NI nimtypekind0; Ropeobj180006* size0; TY537235 LOC17; NI flags0; Ropeobj180006* LOC33; TY534811 LOC34; NimStringDesc* LOC35; nimtypekind0 = (NI)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isobjlackingtypefield_535513_839829468(typ0); if (!LOC3) goto LA4; nimtypekind0 = ((NI) 18); } goto LA1; LA4: ; { nimtypekind0 = ((NI) ((*typ0).kind)); } LA1: ; size0 = (Ropeobj180006*)0; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0)) goto LA9; size0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_133)); } goto LA7; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC12) goto LA13; LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; size0 = gettypedesc_537671_839829468(m0, origtype0); } goto LA7; LA14: ; { size0 = gettypedesc_537671_839829468(m0, typ0); } LA7: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = name0; LOC17[1] = size0; LOC17[2] = rope_180401_2381377266(((NI64) (nimtypekind0))); LOC17[3] = base0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4); flags0 = ((NI) 0); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = containsgarbagecollectedref_322117_3876443242(typ0); if (!!(LOC20)) goto LA21; flags0 = (NI)(flags0 | ((NI) 1)); } LA21: ; { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = canformacycle_322123_3876443242(typ0); if (!!(LOC25)) goto LA26; flags0 = (NI)(flags0 | ((NI) 2)); } LA26: ; { TY534811 LOC32; if (!!((flags0 == ((NI) 0)))) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; LOC32[1] = rope_180401_2381377266(((NI64) (flags0))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2); } LA30: ; LOC33 = (Ropeobj180006*)0; LOC33 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_129)); memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = name0; LOC35 = (NimStringDesc*)0; LOC35 = typetostring_322017_3876443242(typ0, ((Tprefereddesc322011) 0)); LOC34[1] = rope_180277_2381377266(LOC35); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2); } N_NIMCALL(Ropeobj180006*, getnimnode_537945_839829468)(Tcgen531027* m0) { Ropeobj180006* result0; TY534811 LOC1; result0 = (Ropeobj180006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = (*m0).typenodesname; LOC1[1] = rope_180401_2381377266(((NI64) ((*m0).typenodes))); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2); (*m0).typenodes += ((NI) 1); return result0; } N_NIMCALL(void, gentupleinfo_538549_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) { Ropeobj180006* LOC1; Ropeobj180006* expr0; NI length0; TY534811 LOC15; LOC1 = (Ropeobj180006*)0; LOC1 = rope_180277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_537960_839829468(m0, typ0, typ0, name0, LOC1); expr0 = getnimnode_537945_839829468(m0); length0 = sonslen_297327_850551059(typ0); { Ropeobj180006* tmp0; TY534811 LOC6; TY537238 LOC12; if (!(((NI) 0) < length0)) goto LA4; tmp0 = gettempname_535596_839829468(m0); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = tmp0; LOC6[1] = rope_180401_2381377266(((NI64) (length0))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2); { NI i_538571_839829468; NI HEX3Atmp_538590_839829468; NI res_538593_839829468; i_538571_839829468 = (NI)0; HEX3Atmp_538590_839829468 = (NI)0; HEX3Atmp_538590_839829468 = (NI)(length0 - ((NI) 1)); res_538593_839829468 = ((NI) 0); { while (1) { Ttype294840* a0; Ropeobj180006* tmp20; TY537238 LOC10; TY537235 LOC11; if (!(res_538593_839829468 <= HEX3Atmp_538590_839829468)) goto LA9; i_538571_839829468 = res_538593_839829468; a0 = (*typ0).sons->data[i_538571_839829468]; tmp20 = getnimnode_537945_839829468(m0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0; LOC10[1] = rope_180401_2381377266(((NI64) (i_538571_839829468))); LOC10[2] = tmp20; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = tmp20; LOC11[1] = gettypedesc_537671_839829468(m0, typ0); LOC11[2] = rope_180401_2381377266(((NI64) (i_538571_839829468))); LOC11[3] = gentypeinfo_537941_839829468(m0, a0); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4); res_538593_839829468 += ((NI) 1); } LA9: ; } } memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = expr0; LOC12[1] = rope_180401_2381377266(((NI64) (length0))); LOC12[2] = tmp0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3); } goto LA2; LA4: ; { TY534811 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_180401_2381377266(((NI64) (length0))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2); } LA2: ; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = name0; LOC15[1] = expr0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2); } N_NIMCALL(Ttype294840*, fakeclosuretype_539010_839829468)(Tsym294834* owner0) { Ttype294840* result0; Ttype294840* LOC1; Ttype294840* r0; Ttype294840* LOC2; result0 = (Ttype294840*)0; result0 = newtype_297107_850551059(((Ttypekind294244) 18), owner0); LOC1 = (Ttype294840*)0; LOC1 = newtype_297107_850551059(((Ttypekind294244) 26), owner0); rawaddson_298394_850551059(result0, LOC1); r0 = newtype_297107_850551059(((Ttypekind294244) 22), owner0); LOC2 = (Ttype294840*)0; LOC2 = newtype_297107_850551059(((Ttypekind294244) 18), owner0); rawaddson_298394_850551059(r0, LOC2); rawaddson_298394_850551059(result0, r0); return result0; } N_NIMCALL(void, gentypeinfoaux_538027_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0) { Ropeobj180006* base0; base0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; NI LOC4; Ttype294840* x0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = sonslen_297327_850551059(typ0); LOC3 = (((NI) 0) < LOC4); if (!(LOC3)) goto LA5; LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL)); LA5: ; if (!LOC3) goto LA6; x0 = (*typ0).sons->data[((NI) 0)]; { if (!((*typ0).kind == ((Ttypekind294244) 17))) goto LA10; x0 = skiptypes_298099_850551059(x0, IL64(211106247215360)); } LA10: ; base0 = gentypeinfo_537941_839829468(m0, x0); } goto LA1; LA6: ; { base0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_18)); } LA1: ; gentypeinfoauxbase_537960_839829468(m0, typ0, origtype0, name0, base0); } static N_INLINE(NIM_BOOL, iscomplexvaluetype_540317_839829468)(Ttype294840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*t0).kind == ((Ttypekind294244) 16) || (*t0).kind == ((Ttypekind294244) 4) || (*t0).kind == ((Ttypekind294244) 19) || (*t0).kind == ((Ttypekind294244) 18) || (*t0).kind == ((Ttypekind294244) 17)); if (LOC1) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind294244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention294002) 8)); LA4: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, usestringh_534345_839829468)(Tcgen531027* m0) { { NIM_BOOL LOC5; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 4))&7U)))!=0))) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 4))%(sizeof(NU8)*8)); LOC5 = (NIM_BOOL)0; LOC5 = includestr_148249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151)); } LA3: ; } N_NIMCALL(Ropeobj180006*, addrloc_540204_839829468)(Tloc294816 a0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = a0.r; { NIM_BOOL LOC3; Tctypekind531007 LOC5; Ropeobj180006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag294810) 0))&15U)))!=0)); if (!(LOC3)) goto LA4; LOC5 = (Tctypekind531007)0; LOC5 = maptype_535393_839829468(a0.t); LOC3 = !((LOC5 == ((Tctypekind531007) 17))); LA4: ; if (!LOC3) goto LA6; LOC8 = (Ropeobj180006*)0; LOC8 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_128), result0); result0 = HEX26_180447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117)); } LA6: ; return result0; } N_NIMCALL(void, genobjectinit_540242_839829468)(Tcproc531021* p0, Tcprocsection531011 section0, Ttype294840* t0, Tloc294816 a0, NIM_BOOL takeaddr0) { Ttypefieldresult322145 LOC1; LOC1 = (Ttypefieldresult322145)0; LOC1 = analyseobjectwithtypefield_322149_3876443242(t0); switch (LOC1) { case ((Ttypefieldresult322145) 0): { } break; case ((Ttypefieldresult322145) 1): { Ropeobj180006* r0; Ttype294840* s0; TY534811 LOC19; r0 = rdloc_540188_839829468(a0); { TY180507 LOC8; if (!!(takeaddr0)) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = r0; r0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1); } LA6: ; s0 = skiptypes_298099_850551059(t0, IL64(211106232576256)); { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA12: ; if (!!(LOC11)) goto LA13; { while (1) { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = ((*s0).kind == ((Ttypekind294244) 17)); if (!(LOC17)) goto LA18; LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL)); LA18: ; if (!LOC17) goto LA16; add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); s0 = skiptypes_298099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360)); } LA16: ; } } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = r0; LOC19[1] = gentypeinfo_537941_839829468((*p0).module, t0); linefmt_534714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2); } break; case ((Ttypefieldresult322145) 2): { Ropeobj180006* r0; TY534811 LOC26; { if (!takeaddr0) goto LA23; r0 = addrloc_540204_839829468(a0); } goto LA21; LA23: ; { r0 = rdloc_540188_839829468(a0); } LA21: ; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = r0; LOC26[1] = gentypeinfo_537941_839829468((*p0).module, t0); linefmt_534714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2); } break; } } N_NIMCALL(void, constructloc_540388_839829468)(Tcproc531021* p0, Tloc294816 loc0, NIM_BOOL istemp0) { Ttype294840* typ0; typ0 = skiptypes_298099_850551059(loc0.t, IL64(211106233624832)); { NIM_BOOL LOC3; TY534811 LOC6; LOC3 = (NIM_BOOL)0; LOC3 = iscomplexvaluetype_540317_839829468(typ0); if (!!(LOC3)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_540188_839829468(loc0); LOC6[1] = gettypedesc_537671_839829468((*p0).module, typ0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2); } goto LA1; LA4: ; { { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = !(istemp0); if (LOC10) goto LA11; LOC10 = containsgarbagecollectedref_322117_3876443242(loc0.t); LA11: ; if (!LOC10) goto LA12; { NIM_BOOL LOC16; TY534811 LOC19; LOC16 = (NIM_BOOL)0; LOC16 = isimportedcpptype_535476_839829468(typ0); if (!!(LOC16)) goto LA17; usestringh_534345_839829468((*p0).module); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_540204_839829468(loc0); LOC19[1] = rdloc_540188_839829468(loc0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2); } LA17: ; } LA12: ; genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), loc0.t, loc0, NIM_TRUE); } LA1: ; } N_NIMCALL(void, gettemp_539032_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816* result0, NIM_BOOL needsinit0) { Ropeobj180006* LOC1; TY534811 LOC2; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj180006*)0; LOC1 = rope_180401_2381377266(((NI64) ((*p0).labels))); unsureAsgnRef((void**) (&(*result0).r), HEX26_180452_2381377266(((NimStringDesc*) &T839829468_149), LOC1)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC2[1] = (*result0).r; linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2); (*result0).k = ((Tlockind294808) 1); unsureAsgnRef((void**) (&(*result0).t), t0); (*result0).s = ((Tstorageloc294812) 2); (*result0).flags = 0; constructloc_540388_839829468(p0, (*result0), !(needsinit0)); } static N_INLINE(Ropeobj180006*, parentobj_539257_839829468)(Ropeobj180006* accessor0, Tcgen531027* m0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; TY180507 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = accessor0; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1); } goto LA1; LA5: ; { result0 = accessor0; } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, intliteral_541270_839829468)(NI64 i0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (IL64(-2147483648) < i0); if (!(LOC3)) goto LA4; LOC3 = (i0 <= IL64(2147483647)); LA4: ; if (!LOC3) goto LA5; result0 = rope_180401_2381377266(i0); } goto LA1; LA5: ; { TY535289 LOC10; if (!(i0 == IL64(-2147483648))) goto LA8; memset((void*)LOC10, 0, sizeof(LOC10)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0); } goto LA1; LA8: ; { TY180507 LOC14; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_180401_2381377266(i0); result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1); } goto LA1; LA12: ; { TY535289 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, int64literal_551430_839829468)(NI64 i0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { TY180507 LOC5; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_180401_2381377266(i0); result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1); } goto LA1; LA3: ; { TY535289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, uint64literal_551442_839829468)(NU64 i0) { Ropeobj180006* result0; NimStringDesc* LOC1; NimStringDesc* LOC2; result0 = (Ropeobj180006*)0; LOC1 = (NimStringDesc*)0; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_8401_1689653243(i0); LOC1 = rawNewString(LOC2->Sup.len + 3); appendString(LOC1, LOC2); appendString(LOC1, ((NimStringDesc*) &T839829468_171)); result0 = rope_180277_2381377266(LOC1); return result0; } N_NIMCALL(Ropeobj180006*, getstrlit_551468_839829468)(Tcgen531027* m0, NimStringDesc* s0) { Ropeobj180006* result0; Ropeobj180006* LOC1; TY537238 LOC2; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_79)); result0 = gettempname_535596_839829468(m0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = result0; LOC2[1] = makecstring_193638_155036129(s0); LOC2[2] = rope_180401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0)))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3); return result0; } N_NIMCALL(Ropeobj180006*, genliteral_551476_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* ty0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { if (!(ty0 == NIM_NIL)) goto LA3; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165)); } LA3: ; switch ((*n0).kind) { case ((Tnodekind294020) 5) ... ((Tnodekind294020) 15): { Ttype294840* LOC6; LOC6 = (Ttype294840*)0; LOC6 = skiptypes_298099_850551059(ty0, IL64(211106242013440)); switch ((*LOC6).kind) { case ((Ttypekind294244) 2): case ((Ttypekind294244) 5): { result0 = intliteral_541270_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind294244) 1): { { TY535289 LOC13; if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0); } goto LA9; LA11: ; { TY535289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0); } LA9: ; } break; case ((Ttypekind294244) 35): { result0 = int64literal_551430_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind294244) 44): { result0 = uint64literal_551442_839829468(((NU64) ((*n0).kindU.S1.intval))); } break; default: { TY534811 LOC19; Ttype294840* LOC20; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ttype294840*)0; LOC20 = skiptypes_298099_850551059(ty0, IL64(211106242013440)); LOC19[0] = gettypedesc_537671_839829468((*p0).module, LOC20); LOC19[1] = intliteral_541270_839829468((*n0).kindU.S1.intval); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2); } break; } } break; case ((Tnodekind294020) 23): { Ttype294840* t0; t0 = skiptypes_298099_850551059(ty0, IL64(211106242013440)); { NIM_BOOL LOC24; NI id0; Ropeobj180006* LOC28; LOC24 = (NIM_BOOL)0; LOC24 = ((*t0).kind == ((Ttypekind294244) 25)); if (!(LOC24)) goto LA25; LOC24 = ((*t0).callconv == ((Tcallingconvention294002) 8)); LA25: ; if (!LOC24) goto LA26; id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC28 = (Ropeobj180006*)0; LOC28 = rope_180401_2381377266(((NI64) (id0))); result0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC28); { TY534811 LOC33; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC33[1] = result0; addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2); } LA31: ; } goto LA22; LA26: ; { result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_174)); } LA22: ; } break; case ((Tnodekind294020) 20) ... ((Tnodekind294020) 22): { { TY535289 LOC40; if (!(*n0).kindU.S3.strval == 0) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0); } goto LA36; LA38: ; { Ttype294840* LOC42; NI id0; LOC42 = (Ttype294840*)0; LOC42 = skiptypes_298099_850551059(ty0, IL64(211106242013440)); if (!((*LOC42).kind == ((Ttypekind294244) 28))) goto LA43; id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); { TY180507 LOC49; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = getstrlit_551468_839829468((*p0).module, (*n0).kindU.S3.strval); result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1); } goto LA45; LA47: ; { TY534811 LOC51; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = (*(*p0).module).tmpbase; LOC51[1] = rope_180401_2381377266(((NI64) (id0))); result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2); } LA45: ; } goto LA36; LA43: ; { result0 = makecstring_193638_155036129((*n0).kindU.S3.strval); } LA36: ; } break; case ((Tnodekind294020) 16) ... ((Tnodekind294020) 18): { NimStringDesc* LOC54; LOC54 = (NimStringDesc*)0; LOC54 = tostrmaxprecision_300007_3471544153((*n0).kindU.S2.floatval); result0 = rope_180277_2381377266(LOC54); } break; default: { NimStringDesc* LOC56; LOC56 = (NimStringDesc*)0; LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI294020))->Sup.len + 12); appendString(LOC56, ((NimStringDesc*) &T839829468_179)); appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI294020))); appendChar(LOC56, 41); internalerror_198100_155036129((*n0).info, LOC56); result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj180006*, genliteral_541273_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = genliteral_551476_839829468(p0, n0, (*n0).typ); return result0; } N_NIMCALL(void, gencaserange_539028_839829468)(Tcproc531021* p0, Tnode294802* branch0) { NI length0; length0 = len_295081_850551059(branch0); { NI j_549676_839829468; NI HEX3Atmp_549717_839829468; NI res_549720_839829468; j_549676_839829468 = (NI)0; HEX3Atmp_549717_839829468 = (NI)0; HEX3Atmp_549717_839829468 = (NI)(length0 - ((NI) 2)); res_549720_839829468 = ((NI) 0); { while (1) { if (!(res_549720_839829468 <= HEX3Atmp_549717_839829468)) goto LA3; j_549676_839829468 = res_549720_839829468; { Tnode294802* LOC6; LOC6 = (Tnode294802*)0; LOC6 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468); if (!((*LOC6).kind == ((Tnodekind294020) 44))) goto LA7; { TY534811 LOC13; Tnode294802* LOC14; Tnode294802* LOC15; Tnode294802* LOC16; Tnode294802* LOC17; if (!((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 0))&7U)))!=0)) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); LOC14 = (Tnode294802*)0; LOC14 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468); LOC15 = (Tnode294802*)0; LOC15 = HEX5BHEX5D_295238_850551059(LOC14, ((NI) 0)); LOC13[0] = genliteral_541273_839829468(p0, LOC15); LOC16 = (Tnode294802*)0; LOC16 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468); LOC17 = (Tnode294802*)0; LOC17 = HEX5BHEX5D_295238_850551059(LOC16, ((NI) 1)); LOC13[1] = genliteral_541273_839829468(p0, LOC17); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2); } goto LA9; LA11: ; { Tnode294802* v0; Tnode294802* LOC19; Tnode294802* LOC20; LOC19 = (Tnode294802*)0; LOC19 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468); LOC20 = (Tnode294802*)0; LOC20 = HEX5BHEX5D_295238_850551059(LOC19, ((NI) 0)); v0 = copynode_298528_850551059(LOC20); { while (1) { Tnode294802* LOC23; Tnode294802* LOC24; TY180507 LOC25; LOC23 = (Tnode294802*)0; LOC23 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468); LOC24 = (Tnode294802*)0; LOC24 = HEX5BHEX5D_295238_850551059(LOC23, ((NI) 1)); if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = genliteral_541273_839829468(p0, v0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1); (*v0).kindU.S1.intval += ((NI) 1); } LA22: ; } } LA9: ; } goto LA4; LA7: ; { TY180507 LOC27; Tnode294802* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Tnode294802*)0; LOC28 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468); LOC27[0] = genliteral_541273_839829468(p0, LOC28); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1); } LA4: ; res_549720_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, gentraverseproc_539039_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Tnode294802* n0) { { { if (!(n0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; switch ((*n0).kind) { case ((Tnodekind294020) 138): { { NI i_539068_839829468; NI HEX3Atmp_539239_839829468; NI LOC7; NI res_539242_839829468; i_539068_839829468 = (NI)0; HEX3Atmp_539239_839829468 = (NI)0; LOC7 = (NI)0; LOC7 = sonslen_297351_850551059(n0); HEX3Atmp_539239_839829468 = (NI)(LOC7 - ((NI) 1)); res_539242_839829468 = ((NI) 0); { while (1) { if (!(res_539242_839829468 <= HEX3Atmp_539239_839829468)) goto LA9; i_539068_839829468 = res_539242_839829468; gentraverseproc_539039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_539068_839829468]); res_539242_839829468 += ((NI) 1); } LA9: ; } } } break; case ((Tnodekind294020) 139): { Tcproc531021* p0; Tsym294834* disc0; TY534811 LOC15; TY535289 LOC28; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)))) goto LA13; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162)); } LA13: ; p0 = (*c0).p; disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = accessor0; LOC15[1] = (*disc0).loc.r; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2); { NI i_539098_839829468; NI HEX3Atmp_539249_839829468; NI LOC17; NI res_539252_839829468; i_539098_839829468 = (NI)0; HEX3Atmp_539249_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = sonslen_297351_850551059(n0); HEX3Atmp_539249_839829468 = (NI)(LOC17 - ((NI) 1)); res_539252_839829468 = ((NI) 1); { while (1) { Tnode294802* branch0; Tnode294802* LOC26; TY535289 LOC27; if (!(res_539252_839829468 <= HEX3Atmp_539249_839829468)) goto LA19; i_539098_839829468 = res_539252_839829468; branch0 = (*n0).kindU.S6.sons->data[i_539098_839829468]; { if (!((*branch0).kind == ((Tnodekind294020) 85))) goto LA22; gencaserange_539028_839829468((*c0).p, branch0); } goto LA20; LA22: ; { TY535289 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0); } LA20: ; LOC26 = (Tnode294802*)0; LOC26 = lastson_297364_850551059(branch0); gentraverseproc_539039_839829468(c0, accessor0, LOC26); memset((void*)LOC27, 0, sizeof(LOC27)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0); res_539252_839829468 += ((NI) 1); } LA19: ; } } memset((void*)LOC28, 0, sizeof(LOC28)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0); } break; case ((Tnodekind294020) 3): { Tsym294834* field0; TY534811 LOC34; Ropeobj180006* LOC35; field0 = (*n0).kindU.S4.sym; { if (!((*field0).loc.t == NIM_NIL)) goto LA32; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } LA32: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = accessor0; LOC34[1] = (*field0).loc.r; LOC35 = (Ropeobj180006*)0; LOC35 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2); gentraverseproc_539022_839829468(c0, LOC35, (*field0).loc.t); } break; default: { internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } break; } }BeforeRet: ; } N_NIMCALL(void, linecg_534707_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006** LOC1; Ropeobj180006* LOC2; Ropeobj180006* LOC3; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj180006*)0; LOC3 = indentline_534656_839829468(p0, LOC2); add_180482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentraverseproc_539022_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ_539027_839829468) { Ttype294840* typ_539302_839829468; Tcproc531021* p0; { { if (!(typ_539027_839829468 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; typ_539302_839829468 = getuniquetype_530640_2036603609(typ_539027_839829468); p0 = (*c0).p; switch ((*typ_539302_839829468).kind) { case ((Ttypekind294244) 11): case ((Ttypekind294244) 10): case ((Ttypekind294244) 8): { Ttype294840* LOC6; LOC6 = (Ttype294840*)0; LOC6 = lastson_297377_850551059(typ_539302_839829468); gentraverseproc_539022_839829468(c0, accessor0, LOC6); } break; case ((Ttypekind294244) 4): case ((Ttypekind294244) 16): { NI64 arraysize0; Tloc294816 i0; Ttype294840* LOC8; TY534811 LOC9; TY534811 LOC10; Ropeobj180006* LOC11; TY535289 LOC12; arraysize0 = lengthord_322007_3876443242((*typ_539302_839829468).sons->data[((NI) 0)]); memset((void*)(&i0), 0, sizeof(i0)); LOC8 = (Ttype294840*)0; LOC8 = getsystype_340150_3937434831(((Ttypekind294244) 31)); gettemp_539032_839829468(p0, LOC8, (&i0), NIM_FALSE); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = i0.r; LOC9[1] = rope_180401_2381377266(arraysize0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = accessor0; LOC10[1] = i0.r; LOC11 = (Ropeobj180006*)0; LOC11 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2); gentraverseproc_539022_839829468(c0, LOC11, (*typ_539302_839829468).sons->data[((NI) 1)]); memset((void*)LOC12, 0, sizeof(LOC12)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0); } break; case ((Ttypekind294244) 17): { { NI i_539325_839829468; NI HEX3Atmp_539384_839829468; NI LOC15; NI res_539387_839829468; i_539325_839829468 = (NI)0; HEX3Atmp_539384_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = sonslen_297327_850551059(typ_539302_839829468); HEX3Atmp_539384_839829468 = (NI)(LOC15 - ((NI) 1)); res_539387_839829468 = ((NI) 0); { while (1) { Ttype294840* x0; Ropeobj180006* LOC22; if (!(res_539387_839829468 <= HEX3Atmp_539384_839829468)) goto LA17; i_539325_839829468 = res_539387_839829468; x0 = (*typ_539302_839829468).sons->data[i_539325_839829468]; { if (!!((x0 == NIM_NIL))) goto LA20; x0 = skiptypes_298099_850551059(x0, IL64(211106247215360)); } LA20: ; LOC22 = (Ropeobj180006*)0; LOC22 = parentobj_539257_839829468(accessor0, (*(*c0).p).module); gentraverseproc_539022_839829468(c0, LOC22, x0); res_539387_839829468 += ((NI) 1); } LA17: ; } } { if (!!(((*typ_539302_839829468).n == NIM_NIL))) goto LA25; gentraverseproc_539039_839829468(c0, accessor0, (*typ_539302_839829468).n); } LA25: ; } break; case ((Ttypekind294244) 18): { Ttype294840* typ0; typ0 = getuniquetype_530640_2036603609(typ_539302_839829468); { NI i_539363_839829468; NI HEX3Atmp_539392_839829468; NI LOC29; NI res_539395_839829468; i_539363_839829468 = (NI)0; HEX3Atmp_539392_839829468 = (NI)0; LOC29 = (NI)0; LOC29 = sonslen_297327_850551059(typ0); HEX3Atmp_539392_839829468 = (NI)(LOC29 - ((NI) 1)); res_539395_839829468 = ((NI) 0); { while (1) { TY534811 LOC32; Ropeobj180006* LOC33; if (!(res_539395_839829468 <= HEX3Atmp_539392_839829468)) goto LA31; i_539363_839829468 = res_539395_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = accessor0; LOC32[1] = rope_180401_2381377266(((NI64) (i_539363_839829468))); LOC33 = (Ropeobj180006*)0; LOC33 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2); gentraverseproc_539022_839829468(c0, LOC33, (*typ0).sons->data[i_539363_839829468]); res_539395_839829468 += ((NI) 1); } LA31: ; } } } break; case ((Ttypekind294244) 22): case ((Ttypekind294244) 28): case ((Ttypekind294244) 24): { TY180507 LOC35; memset((void*)LOC35, 0, sizeof(LOC35)); LOC35[0] = accessor0; linecg_534707_839829468(p0, ((Tcprocsection531011) 2), (*c0).visitorfrmt, LOC35, 1); } break; case ((Ttypekind294244) 25): { { TY180507 LOC41; TY180507 LOC42; if (!((*typ_539302_839829468).callconv == ((Tcallingconvention294002) 8))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = accessor0; LOC41[0] = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), (*c0).visitorfrmt, LOC41, 1); } LA39: ; } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, gentraverseprocseq_539399_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ0) { Tcproc531021* p0; Tloc294816 i0; Ttype294840* LOC1; TY537238 LOC2; NimStringDesc* LOC3; TY534811 LOC11; Ropeobj180006* LOC12; TY535289 LOC13; p0 = (*c0).p; memset((void*)(&i0), 0, sizeof(i0)); LOC1 = (Ttype294840*)0; LOC1 = getsystype_340150_3937434831(((Ttypekind294244) 31)); gettemp_539032_839829468(p0, LOC1, (&i0), NIM_FALSE); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = i0.r; LOC2[1] = accessor0; LOC3 = (NimStringDesc*)0; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA7: ; if (!LOC6) goto LA8; LOC3 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA4; LA8: ; { LOC3 = copyString(((NimStringDesc*) &T839829468_158)); } LA4: ; LOC2[2] = rope_180277_2381377266(LOC3); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = accessor0; LOC11[1] = i0.r; LOC12 = (Ropeobj180006*)0; LOC12 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2); gentraverseproc_539022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]); memset((void*)LOC13, 0, sizeof(LOC13)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0); } N_NIMCALL(Ropeobj180006*, gentraverseproc_539632_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttypeinforeason539016 reason0) { Ropeobj180006* result0; Ttraversalclosure539019 c0; Tcproc531021* p0; Ropeobj180006* header0; TY180507 LOC3; Ropeobj180006* t0; TY180507 LOC4; TY180507 LOC5; Ropeobj180006* generatedproc0; TY537235 LOC20; Ropeobj180006** LOC21; Ropeobj180006** LOC22; Ropeobj180006** LOC23; TY180507 LOC24; result0 = (Ropeobj180006*)0; memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_531206_3723162438(NIM_NIL, m0); result0 = gettempname_535596_839829468(m0); switch (reason0) { case ((Ttypeinforeason539016) 0): { c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145)); } break; default: { } break; } memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = result0; header0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1); t0 = gettypedesc_537671_839829468(m0, typ0); memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = t0; linef_534700_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = t0; linef_534700_839829468(p0, ((Tcprocsection531011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1); c0.p = p0; { Ropeobj180006* LOC10; if (!((*typ0).kind == ((Ttypekind294244) 24))) goto LA8; LOC10 = (Ropeobj180006*)0; LOC10 = rope_180277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseprocseq_539399_839829468((&c0), LOC10, typ0); } goto LA6; LA8: ; { { Ttype294840* LOC14; Ropeobj180006* LOC17; LOC14 = (Ttype294840*)0; LOC14 = skiptypes_298099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256)); if (!((*LOC14).kind == ((Ttypekind294244) 4) || (*LOC14).kind == ((Ttypekind294244) 16))) goto LA15; LOC17 = (Ropeobj180006*)0; LOC17 = rope_180277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseproc_539022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]); } goto LA12; LA15: ; { Ropeobj180006* LOC19; LOC19 = (Ropeobj180006*)0; LOC19 = rope_180277_2381377266(((NimStringDesc*) &T839829468_189)); gentraverseproc_539022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]); } LA12: ; } LA6: ; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = header0; LOC21 = (Ropeobj180006**)0; LOC21 = s_531179_3723162438(p0, ((Tcprocsection531011) 0)); LOC20[1] = (*LOC21); LOC22 = (Ropeobj180006**)0; LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 1)); LOC20[2] = (*LOC22); LOC23 = (Ropeobj180006**)0; LOC23 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); LOC20[3] = (*LOC23); generatedproc0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = header0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, genarrayinfo_539005_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) { Ropeobj180006* LOC1; LOC1 = (Ropeobj180006*)0; LOC1 = gentypeinfo_537941_839829468(m0, (*typ0).sons->data[((NI) 1)]); gentypeinfoauxbase_537960_839829468(m0, typ0, typ0, name0, LOC1); } N_NIMCALL(void, gensetinfo_538867_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) { Ropeobj180006* tmp0; TY537238 LOC1; NI64 LOC2; gentypeinfoaux_538027_839829468(m0, typ0, typ0, name0); tmp0 = getnimnode_537945_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC2 = (NI64)0; LOC2 = firstord_322001_3876443242(typ0); LOC1[1] = rope_180401_2381377266(LOC2); LOC1[2] = name0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3); } N_NIMCALL(void, genenuminfo_538597_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) { Ropeobj180006* nodeptrs0; NI length0; TY534811 LOC1; Ropeobj180006* enumnames0; Ropeobj180006* specialcases0; NI firstnimnode0; NIM_BOOL hasholes0; Ropeobj180006* enumarray0; Ropeobj180006* counter0; TY180507 LOC24; TY537238 LOC25; TY538847 LOC26; TY537235 LOC27; gentypeinfoaux_538027_839829468(m0, typ0, typ0, name0); nodeptrs0 = gettempname_535596_839829468(m0); length0 = sonslen_297351_850551059((*typ0).n); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = nodeptrs0; LOC1[1] = rope_180401_2381377266(((NI64) (length0))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2); enumnames0 = (Ropeobj180006*)0; specialcases0 = (Ropeobj180006*)0; firstnimnode0 = (*m0).typenodes; hasholes0 = NIM_FALSE; { NI i_538622_839829468; NI HEX3Atmp_538860_839829468; NI res_538863_839829468; i_538622_839829468 = (NI)0; HEX3Atmp_538860_839829468 = (NI)0; HEX3Atmp_538860_839829468 = (NI)(length0 - ((NI) 1)); res_538863_839829468 = ((NI) 0); { while (1) { Tsym294834* field0; Ropeobj180006* elemnode0; if (!(res_538863_839829468 <= HEX3Atmp_538860_839829468)) goto LA4; i_538622_839829468 = res_538863_839829468; field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_538622_839829468]).kindU.S4.sym; elemnode0 = getnimnode_537945_839829468(m0); { Ropeobj180006* LOC9; if (!((*field0).ast == NIM_NIL)) goto LA7; LOC9 = (Ropeobj180006*)0; LOC9 = makecstring_193638_155036129((*(*field0).name).s); add_180482_2381377266(&enumnames0, LOC9); } goto LA5; LA7: ; { Ropeobj180006* LOC11; LOC11 = (Ropeobj180006*)0; LOC11 = makecstring_193638_155036129((*(*field0).ast).kindU.S3.strval); add_180482_2381377266(&enumnames0, LOC11); } LA5: ; { NimStringDesc* LOC16; if (!(i_538622_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14; LOC16 = (NimStringDesc*)0; LOC16 = rawNewString(tnl_178644_4151366050->Sup.len + 2); appendString(LOC16, ((NimStringDesc*) &T839829468_110)); appendString(LOC16, tnl_178644_4151366050); add_180487_2381377266(&enumnames0, LOC16); } LA14: ; { NIM_BOOL LOC19; TY534811 LOC23; LOC19 = (NIM_BOOL)0; LOC19 = !(((*field0).position == i_538622_839829468)); if (LOC19) goto LA20; LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 5))&31U)))!=0); LA20: ; if (!LOC19) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = elemnode0; LOC23[1] = rope_180401_2381377266(((NI64) ((*field0).position))); addf_181205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2); hasholes0 = NIM_TRUE; } LA21: ; res_538863_839829468 += ((NI) 1); } LA4: ; } } enumarray0 = gettempname_535596_839829468(m0); counter0 = gettempname_535596_839829468(m0); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = counter0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = enumarray0; LOC25[1] = rope_180401_2381377266(((NI64) (length0))); LOC25[2] = enumnames0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = counter0; LOC26[1] = rope_180401_2381377266(((NI64) (length0))); LOC26[2] = (*m0).typenodesname; LOC26[3] = rope_180401_2381377266(((NI64) (firstnimnode0))); LOC26[4] = enumarray0; LOC26[5] = nodeptrs0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], specialcases0); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = getnimnode_537945_839829468(m0); LOC27[1] = rope_180401_2381377266(((NI64) (length0))); LOC27[2] = nodeptrs0; LOC27[3] = name0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4); { TY180507 LOC32; if (!hasholes0) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1); } LA30: ; } N_NIMCALL(Ropeobj180006*, discriminatortablename_538057_839829468)(Tcgen531027* m0, Ttype294840* objtype_538060_839829468, Tsym294834* d0) { Ropeobj180006* result0; Ttype294840* objtype0; TY534811 LOC8; NimStringDesc* LOC9; result0 = (Ropeobj180006*)0; objtype0 = objtype_538060_839829468; { while (1) { Tsym294834* LOC3; LOC3 = (Tsym294834*)0; LOC3 = lookupinrecord_301119_2984716966((*objtype0).n, (*d0).name); if (!(LOC3 == NIM_NIL)) goto LA2; objtype0 = (*objtype0).sons->data[((NI) 0)]; } LA2: ; } { if (!((*objtype0).sym == NIM_NIL)) goto LA6; internalerror_198100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200)); } LA6: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_180401_2381377266(((NI64) ((*objtype0).Sup.id))); LOC9 = (NimStringDesc*)0; LOC9 = mangle_530847_2036603609((*(*d0).name).s); LOC8[1] = rope_180277_2381377266(LOC9); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2); return result0; } N_NIMCALL(void, genobjectfields_538104_839829468)(Tcgen531027* m0, Ttype294840* typ0, Tnode294802* n0, Ropeobj180006* expr0) { switch ((*n0).kind) { case ((Tnodekind294020) 138): { NI L0; L0 = sonslen_297351_850551059(n0); { if (!(L0 == ((NI) 1))) goto LA4; genobjectfields_538104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0); } goto LA2; LA4: ; { Ropeobj180006* tmp0; TY534811 LOC9; TY537238 LOC14; if (!(((NI) 0) < L0)) goto LA7; tmp0 = gettempname_535596_839829468(m0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = tmp0; LOC9[1] = rope_180401_2381377266(((NI64) (L0))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2); { NI i_538127_839829468; NI HEX3Atmp_538482_839829468; NI res_538485_839829468; i_538127_839829468 = (NI)0; HEX3Atmp_538482_839829468 = (NI)0; HEX3Atmp_538482_839829468 = (NI)(L0 - ((NI) 1)); res_538485_839829468 = ((NI) 0); { while (1) { Ropeobj180006* tmp20; TY537238 LOC13; if (!(res_538485_839829468 <= HEX3Atmp_538482_839829468)) goto LA12; i_538127_839829468 = res_538485_839829468; tmp20 = getnimnode_537945_839829468(m0); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = tmp0; LOC13[1] = rope_180401_2381377266(((NI64) (i_538127_839829468))); LOC13[2] = tmp20; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3); genobjectfields_538104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_538127_839829468], tmp20); res_538485_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_180401_2381377266(((NI64) (L0))); LOC14[2] = tmp0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3); } goto LA2; LA7: ; { TY534811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = expr0; LOC16[1] = rope_180401_2381377266(((NI64) (L0))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2); } LA2: ; } break; case ((Tnodekind294020) 139): { Tsym294834* field0; Ropeobj180006* tmp0; NI64 L0; TY538401 LOC18; TY534811 LOC19; field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; tmp0 = discriminatortablename_538057_839829468(m0, typ0, field0); L0 = lengthord_322007_3876443242((*field0).typ); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = expr0; LOC18[1] = gettypedesc_537671_839829468(m0, typ0); LOC18[2] = (*field0).loc.r; LOC18[3] = gentypeinfo_537941_839829468(m0, (*field0).typ); LOC18[4] = makecstring_193638_155036129((*(*field0).name).s); LOC18[5] = tmp0; LOC18[6] = rope_180401_2381377266(L0); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0; LOC19[1] = rope_180401_2381377266((NI64)(L0 + IL64(1))); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2); { NI i_538421_839829468; NI HEX3Atmp_538499_839829468; NI LOC21; NI res_538502_839829468; i_538421_839829468 = (NI)0; HEX3Atmp_538499_839829468 = (NI)0; LOC21 = (NI)0; LOC21 = sonslen_297351_850551059(n0); HEX3Atmp_538499_839829468 = (NI)(LOC21 - ((NI) 1)); res_538502_839829468 = ((NI) 1); { while (1) { Tnode294802* b0; Ropeobj180006* tmp20; Tnode294802* LOC24; if (!(res_538502_839829468 <= HEX3Atmp_538499_839829468)) goto LA23; i_538421_839829468 = res_538502_839829468; b0 = (*n0).kindU.S6.sons->data[i_538421_839829468]; tmp20 = getnimnode_537945_839829468(m0); LOC24 = (Tnode294802*)0; LOC24 = lastson_297364_850551059(b0); genobjectfields_538104_839829468(m0, typ0, LOC24, tmp20); switch ((*b0).kind) { case ((Tnodekind294020) 85): { { NI LOC28; LOC28 = (NI)0; LOC28 = sonslen_297351_850551059(b0); if (!(LOC28 < ((NI) 2))) goto LA29; internalerror_198100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204)); } LA29: ; { NI j_538436_839829468; NI HEX3Atmp_538492_839829468; NI LOC32; NI res_538495_839829468; j_538436_839829468 = (NI)0; HEX3Atmp_538492_839829468 = (NI)0; LOC32 = (NI)0; LOC32 = sonslen_297351_850551059(b0); HEX3Atmp_538492_839829468 = (NI)(LOC32 - ((NI) 2)); res_538495_839829468 = ((NI) 0); { while (1) { if (!(res_538495_839829468 <= HEX3Atmp_538492_839829468)) goto LA34; j_538436_839829468 = res_538495_839829468; { NI x0; NI64 LOC39; NI y0; NI64 LOC40; if (!((*(*b0).kindU.S6.sons->data[j_538436_839829468]).kind == ((Tnodekind294020) 44))) goto LA37; LOC39 = (NI64)0; LOC39 = getordvalue_322129_3876443242((*(*b0).kindU.S6.sons->data[j_538436_839829468]).kindU.S6.sons->data[((NI) 0)]); x0 = ((NI) (LOC39)); LOC40 = (NI64)0; LOC40 = getordvalue_322129_3876443242((*(*b0).kindU.S6.sons->data[j_538436_839829468]).kindU.S6.sons->data[((NI) 1)]); y0 = ((NI) (LOC40)); { while (1) { TY537238 LOC43; if (!(x0 <= y0)) goto LA42; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = tmp0; LOC43[1] = rope_180401_2381377266(((NI64) (x0))); LOC43[2] = tmp20; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3); x0 += ((NI) 1); } LA42: ; } } goto LA35; LA37: ; { TY537238 LOC45; NI64 LOC46; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = tmp0; LOC46 = (NI64)0; LOC46 = getordvalue_322129_3876443242((*b0).kindU.S6.sons->data[j_538436_839829468]); LOC45[1] = rope_180401_2381377266(LOC46); LOC45[2] = tmp20; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3); } LA35: ; res_538495_839829468 += ((NI) 1); } LA34: ; } } } break; case ((Tnodekind294020) 88): { TY537238 LOC48; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = tmp0; LOC48[1] = rope_180401_2381377266(L0); LOC48[2] = tmp20; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3); } break; default: { internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205)); } break; } res_538502_839829468 += ((NI) 1); } LA23: ; } } } break; case ((Tnodekind294020) 3): { Tsym294834* field0; field0 = (*n0).kindU.S4.sym; { TY538475 LOC55; if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = expr0; LOC55[1] = gettypedesc_537671_839829468(m0, typ0); LOC55[2] = (*field0).loc.r; LOC55[3] = gentypeinfo_537941_839829468(m0, (*field0).typ); LOC55[4] = makecstring_193638_155036129((*(*field0).name).s); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5); } LA53: ; } break; default: { internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207)); } break; } } N_NIMCALL(void, genobjectinfo_538506_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0) { Ropeobj180006* tmp0; TY534811 LOC12; Ttype294840* t0; { if (!((*typ0).kind == ((Ttypekind294244) 17))) goto LA3; gentypeinfoaux_538027_839829468(m0, typ0, origtype0, name0); } goto LA1; LA3: ; { Ropeobj180006* LOC6; LOC6 = (Ropeobj180006*)0; LOC6 = rope_180277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_537960_839829468(m0, typ0, origtype0, name0, LOC6); } LA1: ; tmp0 = getnimnode_537945_839829468(m0); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = isimportedcpptype_535476_839829468(typ0); if (!!(LOC9)) goto LA10; genobjectfields_538104_839829468(m0, typ0, (*typ0).n, tmp0); } LA10: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = name0; LOC12[1] = tmp0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2); t0 = (*typ0).sons->data[((NI) 0)]; { while (1) { if (!!((t0 == NIM_NIL))) goto LA14; t0 = skiptypes_298099_850551059(t0, IL64(211106247215360)); (*t0).flags |= ((NU32)1)<<((((Ttypeflag294431) 5))%(sizeof(NU32)*8)); t0 = (*t0).sons->data[((NI) 0)]; } LA14: ; } } N_NIMCALL(void, gendeepcopyproc_540066_839829468)(Tcgen531027* m0, Tsym294834* s0, Ropeobj180006* result0) { TY534811 LOC1; genproc_534951_839829468(m0, s0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = result0; LOC1[1] = (*s0).loc.r; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2); } N_NIMCALL(Ropeobj180006*, gentypeinfo_537941_839829468)(Tcgen531027* m0, Ttype294840* t_537944_839829468) { Ropeobj180006* result0; Ttype294840* origtype0; Ttype294840* t0; TY180507 LOC1; Tsym294834* owner0; Ttype294840* LOC12; Ropeobj180006* LOC66; Ropeobj180006* LOC67; Ropeobj180006* LOC68; { result0 = (Ropeobj180006*)0; origtype0 = t_537944_839829468; t0 = getuniquetype_530640_2036603609(t_537944_839829468); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rope_180401_2381377266(((NI64) ((*t0).Sup.id))); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1); { NIM_BOOL LOC4; Ropeobj180006* LOC7; Ropeobj180006* LOC8; Ropeobj180006* LOC9; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_270862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id); if (!LOC4) goto LA5; LOC7 = (Ropeobj180006*)0; LOC7 = rope_180277_2381377266(((NimStringDesc*) &T839829468_128)); LOC8 = (Ropeobj180006*)0; LOC8 = HEX26_180418_2381377266(LOC7, result0); LOC9 = (Ropeobj180006*)0; LOC9 = rope_180277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_180418_2381377266(LOC8, LOC9); goto BeforeRet; } LA5: ; { while (1) { if (!((*t0).kind == ((Ttypekind294244) 13))) goto LA11; t0 = lastson_297377_850551059(t0); } LA11: ; } LOC12 = (Ttype294840*)0; LOC12 = skiptypes_298099_850551059(t0, IL64(211106247256320)); owner0 = getmodule_301123_2984716966((*LOC12).owner); { Tcgen531027* LOC17; Ropeobj180006* LOC18; Ropeobj180006* LOC19; Ropeobj180006* LOC20; TY534811 LOC21; NimStringDesc* LOC22; Ropeobj180006* LOC23; Ropeobj180006* LOC24; Ropeobj180006* LOC25; if (!!((owner0 == (*m0).module))) goto LA15; LOC17 = (Tcgen531027*)0; LOC17 = bmod_531201_3723162438(owner0); LOC18 = (Ropeobj180006*)0; LOC18 = gentypeinfo_537941_839829468(LOC17, t0); LOC19 = (Ropeobj180006*)0; LOC19 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_129)); LOC20 = (Ropeobj180006*)0; LOC20 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_130)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = result0; LOC22 = (NimStringDesc*)0; LOC22 = typetostring_322017_3876443242(t0, ((Tprefereddesc322011) 0)); LOC21[1] = rope_180277_2381377266(LOC22); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2); LOC23 = (Ropeobj180006*)0; LOC23 = rope_180277_2381377266(((NimStringDesc*) &T839829468_128)); LOC24 = (Ropeobj180006*)0; LOC24 = HEX26_180418_2381377266(LOC23, result0); LOC25 = (Ropeobj180006*)0; LOC25 = rope_180277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_180418_2381377266(LOC24, LOC25); goto BeforeRet; } LA15: ; switch ((*t0).kind) { case ((Ttypekind294244) 3): case ((Ttypekind294244) 62): { result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_132)); } break; case ((Ttypekind294244) 26): case ((Ttypekind294244) 1): case ((Ttypekind294244) 2): case ((Ttypekind294244) 29): case ((Ttypekind294244) 28): case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44): case ((Ttypekind294244) 23): { Ropeobj180006* LOC28; LOC28 = (Ropeobj180006*)0; LOC28 = rope_180277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_537960_839829468(m0, t0, t0, result0, LOC28); } break; case ((Ttypekind294244) 59): { { Ttype294840* LOC34; if (!!(((*t0).n == NIM_NIL))) goto LA32; LOC34 = (Ttype294840*)0; LOC34 = lastson_297377_850551059(t0); result0 = gentypeinfo_537941_839829468(m0, LOC34); } goto LA30; LA32: ; { NimStringDesc* LOC36; LOC36 = (NimStringDesc*)0; LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI294244))->Sup.len + 13); appendString(LOC36, ((NimStringDesc*) &T839829468_137)); appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI294244))); appendChar(LOC36, 41); internalerror_198113_155036129(LOC36); } LA30: ; } break; case ((Ttypekind294244) 25): { { Ropeobj180006* LOC42; if (!!(((*t0).callconv == ((Tcallingconvention294002) 8)))) goto LA40; LOC42 = (Ropeobj180006*)0; LOC42 = rope_180277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_537960_839829468(m0, t0, t0, result0, LOC42); } goto LA38; LA40: ; { Ttype294840* LOC44; LOC44 = (Ttype294840*)0; LOC44 = fakeclosuretype_539010_839829468((*t0).owner); gentupleinfo_538549_839829468(m0, LOC44, result0); } LA38: ; } break; case ((Ttypekind294244) 24): case ((Ttypekind294244) 22): { gentypeinfoaux_538027_839829468(m0, t0, t0, result0); { Ropeobj180006* markerproc0; TY534811 LOC50; if (!(((Tgcmode171080) 4) <= gselectedgc_171133_2607990831)) goto LA48; markerproc0 = gentraverseproc_539632_839829468(m0, t0, ((Ttypeinforeason539016) 0)); memset((void*)LOC50, 0, sizeof(LOC50)); LOC50[0] = result0; LOC50[1] = markerproc0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2); } LA48: ; } break; case ((Ttypekind294244) 21): case ((Ttypekind294244) 20): { gentypeinfoaux_538027_839829468(m0, t0, t0, result0); } break; case ((Ttypekind294244) 4): case ((Ttypekind294244) 16): { genarrayinfo_539005_839829468(m0, t0, result0); } break; case ((Ttypekind294244) 19): { gensetinfo_538867_839829468(m0, t0, result0); } break; case ((Ttypekind294244) 14): { genenuminfo_538597_839829468(m0, t0, result0); } break; case ((Ttypekind294244) 17): { genobjectinfo_538506_839829468(m0, t0, origtype0, result0); } break; case ((Ttypekind294244) 18): { gentupleinfo_538549_839829468(m0, t0, result0); } break; default: { NimStringDesc* LOC58; LOC58 = (NimStringDesc*)0; LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI294244))->Sup.len + 13); appendString(LOC58, ((NimStringDesc*) &T839829468_137)); appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI294244))); appendChar(LOC58, 41); internalerror_198113_155036129(LOC58); } break; } { if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61; gendeepcopyproc_540066_839829468(m0, (*t0).deepcopy, result0); } goto LA59; LA61: ; { if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64; gendeepcopyproc_540066_839829468(m0, (*origtype0).deepcopy, result0); } goto LA59; LA64: ; LA59: ; LOC66 = (Ropeobj180006*)0; LOC66 = rope_180277_2381377266(((NimStringDesc*) &T839829468_128)); LOC67 = (Ropeobj180006*)0; LOC67 = HEX26_180418_2381377266(LOC66, result0); LOC68 = (Ropeobj180006*)0; LOC68 = rope_180277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_180418_2381377266(LOC67, LOC68); }BeforeRet: ; return result0; } N_NIMCALL(void, localdebuginfo_540449_839829468)(Tcproc531021* p0, Tsym294834* s0) { Ropeobj180006* a0; TY537235 LOC16; NimStringDesc* LOC17; { { if (!!(((163840 & (*p0).options) == 163840))) goto LA3; goto BeforeRet; } LA3: ; { Ttype294840* LOC7; LOC7 = (Ttype294840*)0; LOC7 = skiptypes_298099_850551059((*s0).typ, IL64(211106240964864)); if (!((*LOC7).kind == ((Ttypekind294244) 27) || (*LOC7).kind == ((Ttypekind294244) 48))) goto LA8; goto BeforeRet; } LA8: ; a0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*s0).kind == ((Tsymkind294435) 3)); if (!(LOC12)) goto LA13; LOC12 = ccgintroducedptr_535609_839829468(s0); LA13: ; if (!LOC12) goto LA14; a0 = (*s0).loc.r; } LA14: ; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_180401_2381377266(((NI64) ((*p0).maxframelen))); LOC17 = (NimStringDesc*)0; LOC17 = nsuNormalize((*(*s0).name).s); LOC16[1] = makecstring_193638_155036129(LOC17); LOC16[2] = a0; LOC16[3] = gentypeinfo_537941_839829468((*p0).module, (*s0).loc.t); linef_534700_839829468(p0, ((Tcprocsection531011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4); (*p0).maxframelen += ((NI) 1); (*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1); }BeforeRet: ; } N_NIMCALL(void, assignlocalvar_540614_839829468)(Tcproc531021* p0, Tsym294834* s0) { Ropeobj180006* decl0; Ropeobj180006* LOC1; Ropeobj180006* LOC2; LOC1 = (Ropeobj180006*)0; LOC1 = localvardecl_540532_839829468(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = HEX26_180447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125)); decl0 = HEX26_180447_2381377266(LOC2, tnl_178644_4151366050); line_534690_839829468(p0, ((Tcprocsection531011) 0), decl0); localdebuginfo_540449_839829468(p0, s0); } N_NIMCALL(void, initlocalvar_540398_839829468)(Tcproc531021* p0, Tsym294834* v0, NIM_BOOL immediateasgn0) { { if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0))) goto LA3; { if (!!(immediateasgn0)) goto LA7; constructloc_540388_839829468(p0, (*v0).loc, NIM_FALSE); } LA7: ; } LA3: ; } N_NIMCALL(void, fillresult_535865_839829468)(Tsym294834* param0) { TY535289 LOC1; Ropeobj180006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj180006*)0; LOC2 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0); fillloc_534282_839829468((&(*param0).loc), ((Tlockind294808) 4), (*param0).typ, LOC2, ((Tstorageloc294812) 2)); { NIM_BOOL LOC5; Tctypekind531007 LOC6; LOC5 = (NIM_BOOL)0; LOC6 = (Tctypekind531007)0; LOC6 = mapreturntype_535445_839829468((*param0).typ); LOC5 = !((LOC6 == ((Tctypekind531007) 17))); if (!(LOC5)) goto LA7; LOC5 = isinvalidreturntype_535548_839829468((*param0).typ); LA7: ; if (!LOC5) goto LA8; (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc294812) 0); } LA8: ; } N_NIMCALL(void, assignparam_540994_839829468)(Tcproc531021* p0, Tsym294834* s0) { localdebuginfo_540449_839829468(p0, s0); } N_NIMCALL(void, closuresetup_562158_839829468)(Tcproc531021* p0, Tsym294834* prc0) { Tnode294802* ls0; Tnode294802* LOC5; Tsym294834* env0; TY534811 LOC10; { { if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag294431) 11))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; LOC5 = (Tnode294802*)0; LOC5 = HEX5BHEX5D_295238_850551059((*prc0).ast, ((NI) 3)); ls0 = lastson_297364_850551059(LOC5); { if (!!(((*ls0).kind == ((Tnodekind294020) 3)))) goto LA8; internalerror_198100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211)); } LA8: ; env0 = (*ls0).kindU.S4.sym; assignlocalvar_540614_839829468(p0, env0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_540188_839829468((*env0).loc); LOC10[1] = gettypedesc_537671_839829468((*p0).module, (*env0).typ); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2); }BeforeRet: ; } N_NIMCALL(Ropeobj180006*, initgcframe_540435_839829468)(Tcproc531021* p0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { TY180507 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).gcframetype; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(Ropeobj180006*, initframe_562140_839829468)(Tcproc531021* p0, Ropeobj180006* procname0, Ropeobj180006* filename0) { Ropeobj180006* result0; Ropeobj180006* LOC1; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218)); { Ropeobj180006* LOC6; TY537235 LOC7; if (!(((NI) 0) < (*p0).maxframelen)) goto LA4; LOC6 = (Ropeobj180006*)0; LOC6 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = procname0; LOC7[1] = filename0; LOC7[2] = rope_180401_2381377266(((NI64) ((*p0).maxframelen))); LOC7[3] = rope_180401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen))); result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4); } goto LA2; LA4: ; { TY534811 LOC9; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = procname0; LOC9[1] = filename0; result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2); } LA2: ; return result0; } N_NIMCALL(void, appcg_534648_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006** LOC1; Ropeobj180006* LOC2; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, args0, args0Len0); add_180482_2381377266(LOC1, LOC2); } N_NIMCALL(Ropeobj180006*, deinitgcframe_540441_839829468)(Tcproc531021* p0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { TY535289 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0); } LA3: ; return result0; } N_NIMCALL(Ropeobj180006*, deinitframe_562150_839829468)(Tcproc531021* p0) { Ropeobj180006* result0; TY535289 LOC1; result0 = (Ropeobj180006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0); return result0; } N_NIMCALL(void, genprocaux_562284_839829468)(Tcgen531027* m0, Tsym294834* prc0) { Tcproc531021* p0; Ropeobj180006* header0; Ropeobj180006* returnstmt0; Tnode294802* LOC51; Ropeobj180006* generatedproc0; p0 = newproc_531206_3723162438(prc0, m0); header0 = genprocheader_537867_839829468(m0, prc0); returnstmt0 = NIM_NIL; { NIM_BOOL LOC3; Tsym294834* res0; LOC3 = (NIM_BOOL)0; LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)); if (!(LOC3)) goto LA4; LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL)); LA4: ; if (!LOC3) goto LA5; { NI LOC9; LOC9 = (NI)0; LOC9 = len_295081_850551059((*prc0).ast); if (!(LOC9 <= ((NI) 7))) goto LA10; internalerror_198100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120)); } LA10: ; res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym; { NIM_BOOL LOC14; TY180507 LOC34; LOC14 = (NIM_BOOL)0; LOC14 = isinvalidreturntype_535548_839829468((*(*prc0).typ).sons->data[((NI) 0)]); if (!!(LOC14)) goto LA15; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA19; (*res0).flags |= ((NU32)1)<<((((Tsymflag294184) 12))%(sizeof(NU32)*8)); } LA19: ; { NIM_BOOL LOC23; NIM_BOOL LOC24; NIM_BOOL LOC26; Tnode294802* val0; Tnode294802* LOC29; Ropeobj180006* decl0; Tloc294816 a0; TY534811 LOC32; LOC23 = (NIM_BOOL)0; LOC24 = (NIM_BOOL)0; LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0); if (!(LOC24)) goto LA25; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA27: ; LOC24 = LOC26; LA25: ; LOC23 = LOC24; if (!(LOC23)) goto LA28; LOC29 = (Tnode294802*)0; LOC29 = getbody_337227_1724185294(prc0); val0 = easyresultasgn_562191_839829468(LOC29); LOC23 = !((val0 == NIM_NIL)); LA28: ; if (!LOC23) goto LA30; decl0 = localvardecl_540532_839829468(p0, res0); memset((void*)(&a0), 0, sizeof(a0)); initlocexprsingleuse_541289_839829468(p0, val0, (&a0)); memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = decl0; LOC32[1] = rdloc_540188_839829468(a0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2); } goto LA21; LA30: ; { assignlocalvar_540614_839829468(p0, res0); initlocalvar_540398_839829468(p0, res0, NIM_FALSE); } LA21: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_540188_839829468((*res0).loc); returnstmt0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1); } goto LA12; LA15: ; { fillresult_535865_839829468(res0); assignparam_540994_839829468(p0, res0); { Ttype294840* LOC38; LOC38 = (Ttype294840*)0; LOC38 = skiptypes_298099_850551059((*res0).typ, IL64(211106232576256)); if (!((*LOC38).kind == ((Ttypekind294244) 16))) goto LA39; (*res0).loc.s = ((Tstorageloc294812) 0); } LA39: ; } LA12: ; } LA5: ; { NI i_562627_839829468; NI HEX3Atmp_562743_839829468; NI LOC42; NI res_562746_839829468; i_562627_839829468 = (NI)0; HEX3Atmp_562743_839829468 = (NI)0; LOC42 = (NI)0; LOC42 = sonslen_297351_850551059((*(*prc0).typ).n); HEX3Atmp_562743_839829468 = (NI)(LOC42 - ((NI) 1)); res_562746_839829468 = ((NI) 1); { while (1) { if (!(res_562746_839829468 <= HEX3Atmp_562743_839829468)) goto LA44; i_562627_839829468 = res_562746_839829468; { Tsym294834* param0; param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_562627_839829468]).kindU.S4.sym; { NIM_BOOL LOC48; LOC48 = (NIM_BOOL)0; LOC48 = iscompiletimeonly_330706_3876443242((*param0).typ); if (!LOC48) goto LA49; goto LA45; } LA49: ; assignparam_540994_839829468(p0, param0); } LA45: ; res_562746_839829468 += ((NI) 1); } LA44: ; } } closuresetup_562158_839829468(p0, prc0); LOC51 = (Tnode294802*)0; LOC51 = getbody_337227_1724185294(prc0); genstmts_541244_839829468(p0, LOC51); generatedproc0 = (Ropeobj180006*)0; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 14))&31U)))!=0)) goto LA54; { if (!((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 6))&7U)))!=0)) goto LA58; header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA58: ; } LA54: ; { TY537235 LOC68; Ropeobj180006** LOC69; Ropeobj180006** LOC70; Ropeobj180006** LOC71; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)) goto LA62; { if (!((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 6))&7U)))!=0)) goto LA66; header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_214), header0); } LA66: ; memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = header0; LOC69 = (Ropeobj180006**)0; LOC69 = s_531179_3723162438(p0, ((Tcprocsection531011) 0)); LOC68[1] = (*LOC69); LOC70 = (Ropeobj180006**)0; LOC70 = s_531179_3723162438(p0, ((Tcprocsection531011) 1)); LOC68[2] = (*LOC70); LOC71 = (Ropeobj180006**)0; LOC71 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); LOC68[3] = (*LOC71); generatedproc0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4); } goto LA60; LA62: ; { TY180507 LOC73; Ropeobj180006* LOC74; Ropeobj180006** LOC93; Ropeobj180006** LOC94; Ropeobj180006* LOC101; TY535289 LOC107; Ropeobj180006* LOC108; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = header0; generatedproc0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1); LOC74 = (Ropeobj180006*)0; LOC74 = initgcframe_540435_839829468(p0); add_180482_2381377266(&generatedproc0, LOC74); { Ropeobj180006** LOC79; Ropeobj180006* procname0; Ropeobj180006* LOC80; Ropeobj180006* LOC81; if (!(((*prc0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA77; LOC79 = (Ropeobj180006**)0; LOC79 = s_531179_3723162438(p0, ((Tcprocsection531011) 0)); add_180482_2381377266(&generatedproc0, (*LOC79)); procname0 = makecstring_193638_155036129((*(*prc0).name).s); LOC80 = (Ropeobj180006*)0; LOC80 = quotedfilename_198818_155036129((*prc0).info); LOC81 = (Ropeobj180006*)0; LOC81 = initframe_562140_839829468(p0, procname0, LOC80); add_180482_2381377266(&generatedproc0, LOC81); } goto LA75; LA77: ; { Ropeobj180006** LOC83; LOC83 = (Ropeobj180006**)0; LOC83 = s_531179_3723162438(p0, ((Tcprocsection531011) 0)); add_180482_2381377266(&generatedproc0, (*LOC83)); } LA75: ; { TY535289 LOC88; if (!(((*prc0).options &(1U<<((NU)(((Toption171009) 19))&31U)))!=0)) goto LA86; memset((void*)LOC88, 0, sizeof(LOC88)); appcg_534648_839829468(p0, ((Tcprocsection531011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0); } LA86: ; { if (!(*p0).beforeretneeded) goto LA91; add_180487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223)); } LA91: ; LOC93 = (Ropeobj180006**)0; LOC93 = s_531179_3723162438(p0, ((Tcprocsection531011) 1)); add_180482_2381377266(&generatedproc0, (*LOC93)); LOC94 = (Ropeobj180006**)0; LOC94 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(&generatedproc0, (*LOC94)); { TY535289 LOC99; Ropeobj180006* LOC100; if (!(*p0).beforeretneeded) goto LA97; memset((void*)LOC99, 0, sizeof(LOC99)); LOC100 = (Ropeobj180006*)0; LOC100 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0); add_180482_2381377266(&generatedproc0, LOC100); } LA97: ; LOC101 = (Ropeobj180006*)0; LOC101 = deinitgcframe_540441_839829468(p0); add_180482_2381377266(&generatedproc0, LOC101); { Ropeobj180006* LOC106; if (!(((*prc0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA104; LOC106 = (Ropeobj180006*)0; LOC106 = deinitframe_562150_839829468(p0); add_180482_2381377266(&generatedproc0, LOC106); } LA104: ; add_180482_2381377266(&generatedproc0, returnstmt0); memset((void*)LOC107, 0, sizeof(LOC107)); LOC108 = (Ropeobj180006*)0; LOC108 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0); add_180482_2381377266(&generatedproc0, LOC108); } LA60: ; add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], generatedproc0); } N_NIMCALL(Tcgen531027*, findpendingmodule_534241_839829468)(Tcgen531027* m0, Tsym294834* s0) { Tcgen531027* result0; Tsym294834* ms0; result0 = (Tcgen531027*)0; ms0 = getmodule_301123_2984716966(s0); result0 = gmodules_531170_3723162438->data[(*ms0).position]; return result0; } N_NIMCALL(NIM_BOOL, isgetprocaddr_561442_839829468)(Tlib294820* lib0) { NIM_BOOL result0; Tnode294802* n0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; n0 = (*lib0).path; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*n0).kind == ((Tnodekind294020) 27) || (*n0).kind == ((Tnodekind294020) 29) || (*n0).kind == ((Tnodekind294020) 30) || (*n0).kind == ((Tnodekind294020) 31) || (*n0).kind == ((Tnodekind294020) 26) || (*n0).kind == ((Tnodekind294020) 28) || (*n0).kind == ((Tnodekind294020) 32)); if (!(LOC2)) goto LA3; LOC2 = !(((*n0).typ == NIM_NIL)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).typ).kind == ((Ttypekind294244) 26) || (*(*n0).typ).kind == ((Ttypekind294244) 25)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, initlocexpr_541283_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0) { initloc_534273_839829468(result0, ((Tlockind294808) 0), (*e0).typ, ((Tstorageloc294812) 0)); expr_541248_839829468(p0, e0, result0); } N_NIMCALL(void, loaddynamiclib_561480_839829468)(Tcgen531027* m0, Tlib294820* lib0) { { Ropeobj180006* tmp0; TY180507 LOC5; if (!!((*lib0).generated)) goto LA3; (*lib0).generated = NIM_TRUE; tmp0 = gettempname_535596_839829468(m0); asgnRefNoCycle((void**) (&(*lib0).name), tmp0); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = tmp0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1); { TY136002* s0; Ropeobj180006* loadlib0; TY534811 LOC18; if (!((*(*lib0).path).kind >= ((Tnodekind294020) 20) && (*(*lib0).path).kind <= ((Tnodekind294020) 22))) goto LA8; s0 = (TY136002*) newSeq((&NTI136002), 0); libcandidates_172605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0)); rawmessage_196612_155036129(((Tmsgkind193002) 286), (*(*lib0).path).kindU.S3.strval); loadlib0 = NIM_NIL; { NI i_561847_839829468; NI HEX3Atmp_561902_839829468; NI res_561905_839829468; i_561847_839829468 = (NI)0; HEX3Atmp_561902_839829468 = (NI)0; HEX3Atmp_561902_839829468 = (s0 ? (s0->Sup.len-1) : -1); res_561905_839829468 = ((NI) 0); { while (1) { TY534811 LOC17; if (!(res_561905_839829468 <= HEX3Atmp_561902_839829468)) goto LA12; i_561847_839829468 = res_561905_839829468; (*m0).labels += ((NI) 1); { if (!(((NI) 0) < i_561847_839829468)) goto LA15; add_180487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229)); } LA15: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = getstrlit_551468_839829468(m0, s0->data[i_561847_839829468]); appcg_534632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2); res_561905_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = loadlib0; LOC18[1] = getstrlit_551468_839829468(m0, (*(*lib0).path).kindU.S3.strval); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2); } goto LA6; LA8: ; { Tcproc531021* p0; Tloc294816 dest0; Ropeobj180006** LOC20; Ropeobj180006** LOC21; Ropeobj180006** LOC22; TY534811 LOC23; p0 = newproc_531206_3723162438(NIM_NIL, m0); (*p0).options = ((*p0).options & ~ 163840); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_541283_839829468(p0, (*lib0).path, (&dest0)); LOC20 = (Ropeobj180006**)0; LOC20 = s_531179_3723162438(p0, ((Tcprocsection531011) 0)); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], (*LOC20)); LOC21 = (Ropeobj180006**)0; LOC21 = s_531179_3723162438(p0, ((Tcprocsection531011) 1)); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 16))- 0], (*LOC21)); LOC22 = (Ropeobj180006**)0; LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 16))- 0], (*LOC22)); memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = tmp0; LOC23[1] = rdloc_540188_839829468(dest0); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2); } LA6: ; } LA3: ; { if (!((*lib0).name == NIM_NIL)) goto LA26; internalerror_198113_155036129(((NimStringDesc*) &T839829468_233)); } LA26: ; } N_NIMCALL(Ropeobj180006*, mangledynlibproc_540816_839829468)(Tsym294834* sym0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 16))&31U)))!=0)) goto LA3; result0 = rope_180277_2381377266((*(*sym0).name).s); } goto LA1; LA3: ; { TY180507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_180401_2381377266(((NI64) ((*sym0).Sup.id))); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1); } LA1: ; return result0; } N_NIMCALL(void, symindynamiclib_561929_839829468)(Tcgen531027* m0, Tsym294834* sym0) { Tlib294820* lib0; NIM_BOOL iscall0; Ropeobj180006* extname0; Ropeobj180006* tmp0; TY534811 LOC43; lib0 = (*sym0).annex; iscall0 = isgetprocaddr_561442_839829468(lib0); extname0 = (*sym0).loc.r; { if (!!(iscall0)) goto LA3; loaddynamiclib_561480_839829468(m0, lib0); } LA3: ; tmp0 = mangledynlibproc_540816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); (*m0).labels += ((NI) 2); { Tnode294802* n0; Tloc294816 a0; Tnode294802* LOC9; Ropeobj180006* params0; Ropeobj180006* LOC10; Ropeobj180006* load0; TY537235 LOC17; NimStringDesc* LOC18; Tnode294802* last0; NimStringDesc* idx0; if (!iscall0) goto LA7; n0 = (*lib0).path; memset((void*)(&a0), 0, sizeof(a0)); LOC9 = (Tnode294802*)0; LOC9 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0)); initlocexpr_541283_839829468((*m0).initproc, LOC9, (&a0)); LOC10 = (Ropeobj180006*)0; LOC10 = rdloc_540188_839829468(a0); params0 = HEX26_180447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118)); { NI i_561964_839829468; NI HEX3Atmp_562025_839829468; NI LOC12; NI res_562028_839829468; i_561964_839829468 = (NI)0; HEX3Atmp_562025_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = len_295081_850551059(n0); HEX3Atmp_562025_839829468 = (NI)(LOC12 - ((NI) 2)); res_562028_839829468 = ((NI) 1); { while (1) { Tnode294802* LOC15; Ropeobj180006* LOC16; if (!(res_562028_839829468 <= HEX3Atmp_562025_839829468)) goto LA14; i_561964_839829468 = res_562028_839829468; LOC15 = (Tnode294802*)0; LOC15 = HEX5BHEX5D_295238_850551059(n0, i_561964_839829468); initlocexpr_541283_839829468((*m0).initproc, LOC15, (&a0)); LOC16 = (Ropeobj180006*)0; LOC16 = rdloc_540188_839829468(a0); add_180482_2381377266(&params0, LOC16); add_180487_2381377266(&params0, ((NimStringDesc*) &T839829468_110)); res_562028_839829468 += ((NI) 1); } LA14: ; } } memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = gettypedesc_537671_839829468(m0, (*sym0).typ); LOC17[2] = params0; LOC18 = (NimStringDesc*)0; LOC18 = HEX24_180856_2381377266(extname0); LOC17[3] = makecstring_193638_155036129(LOC18); load0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4); last0 = lastson_297364_850551059(n0); { if (!((*last0).kind == ((Tnodekind294020) 58))) goto LA21; last0 = (*last0).kindU.S6.sons->data[((NI) 1)]; } LA21: ; { NimStringDesc* LOC27; if (!!(((*last0).kind == ((Tnodekind294020) 20)))) goto LA25; LOC27 = (NimStringDesc*)0; LOC27 = HEX24_198185_1689653243(T839829468_236); internalerror_198113_155036129(LOC27); } LA25: ; idx0 = (*last0).kindU.S3.strval; { Ropeobj180006** LOC32; if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30; LOC32 = (Ropeobj180006**)0; LOC32 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 2)); add_180482_2381377266(LOC32, load0); } goto LA28; LA30: ; { NIM_BOOL LOC34; LOC34 = (NIM_BOOL)0; LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1)); if (!(LOC34)) goto LA35; LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57))); LA35: ; if (!LOC34) goto LA36; add_180482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0); } goto LA28; LA36: ; { NimStringDesc* LOC39; LOC39 = (NimStringDesc*)0; LOC39 = rawNewString(idx0->Sup.len + 13); appendString(LOC39, ((NimStringDesc*) &T839829468_237)); appendString(LOC39, idx0); internalerror_198100_155036129((*sym0).info, LOC39); } LA28: ; } goto LA5; LA7: ; { TY537235 LOC41; NimStringDesc* LOC42; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = gettypedesc_537671_839829468(m0, (*sym0).typ); LOC41[2] = (*lib0).name; LOC42 = (NimStringDesc*)0; LOC42 = HEX24_180856_2381377266(extname0); LOC41[3] = makecstring_193638_155036129(LOC42); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4); } LA5: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*sym0).loc.r; LOC43[1] = gettypedesc_537671_839829468(m0, (*sym0).loc.t); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2); } N_NIMCALL(void, symindynamiclibpartial_562071_839829468)(Tcgen531027* m0, Tsym294834* sym0) { asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_540816_839829468(sym0)); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); } N_NIMCALL(void, genprocnoforward_562906_839829468)(Tcgen531027* m0, Tsym294834* prc0) { { fillprocloc_541201_839829468(prc0); useheader_534369_839829468(m0, prc0); { Ropeobj180006* LOC5; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 7))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj180006*)0; LOC5 = cgsym_534403_839829468(m0, (*(*prc0).name).s); goto BeforeRet; } LA3: ; genprocprototype_541254_839829468(m0, prc0); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA8; } goto LA6; LA8: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention294002) 5))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id); if (!!(LOC15)) goto LA16; genprocaux_562284_839829468(m0, prc0); } LA16: ; } goto LA6; LA11: ; { Tcgen531027* q0; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA19; q0 = findpendingmodule_534241_839829468(m0, prc0); { NIM_BOOL LOC23; NIM_BOOL LOC25; LOC23 = (NIM_BOOL)0; LOC23 = !((q0 == NIM_NIL)); if (!(LOC23)) goto LA24; LOC25 = (NIM_BOOL)0; LOC25 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC23 = !(LOC25); LA24: ; if (!LOC23) goto LA26; symindynamiclib_561929_839829468(q0, prc0); } goto LA21; LA26: ; { symindynamiclibpartial_562071_839829468(m0, prc0); } LA21: ; } goto LA6; LA19: ; { Tcgen531027* q0; if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0))) goto LA30; q0 = findpendingmodule_534241_839829468(m0, prc0); { NIM_BOOL LOC34; NIM_BOOL LOC36; LOC34 = (NIM_BOOL)0; LOC34 = !((q0 == NIM_NIL)); if (!(LOC34)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC34 = !(LOC36); LA35: ; if (!LOC34) goto LA37; genprocaux_562284_839829468(q0, prc0); } LA37: ; } goto LA6; LA30: ; LA6: ; }BeforeRet: ; } N_NIMCALL(void, genproc_534951_839829468)(Tcgen531027* m0, Tsym294834* prc0) { { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 26))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isactivated_563431_839829468(prc0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; goto BeforeRet; } LA6: ; fillprocloc_541201_839829468(prc0); { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 4))&31U)))!=0)) goto LA10; addforwardedproc_534203_839829468(m0, prc0); } goto LA8; LA10: ; { genprocnoforward_562906_839829468(m0, prc0); { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = ((65600 & (*prc0).flags) == 64); if (!(LOC16)) goto LA17; LOC16 = !((generatedheader_534201_839829468 == NIM_NIL)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)); LA18: ; if (!LOC15) goto LA19; genprocprototype_541254_839829468(generatedheader_534201_839829468, prc0); { if (!((*(*prc0).typ).callconv == ((Tcallingconvention294002) 5))) goto LA23; { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = containsorincl_270862_2627731572((&(*generatedheader_534201_839829468).declaredthings), (*prc0).Sup.id); if (!!(LOC27)) goto LA28; genprocaux_562284_839829468(generatedheader_534201_839829468, prc0); } LA28: ; } LA23: ; } LA19: ; } LA8: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, emulatedthreadvars_534949_839829468)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((71303168 & ~ gglobaloptions_171130_2607990831)==0); return result0; } N_NIMCALL(void, declarethreadvar_540676_839829468)(Tcgen531027* m0, Tsym294834* s0, NIM_BOOL isextern0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_534949_839829468(); if (!LOC3) goto LA4; { NIM_BOOL LOC8; TY534811 LOC11; LOC8 = (NIM_BOOL)0; LOC8 = containsorincl_270862_2627731572((&nimtvdeclared_540675_839829468), (*s0).Sup.id); if (!!(LOC8)) goto LA9; nimtvdeps_540674_839829468 = (Ttypeseq294836*) incrSeqV2(&(nimtvdeps_540674_839829468)->Sup, sizeof(Ttype294840*)); asgnRefNoCycle((void**) (&nimtvdeps_540674_839829468->data[nimtvdeps_540674_839829468->Sup.len]), (*s0).loc.t); ++nimtvdeps_540674_839829468->Sup.len; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_537671_839829468(m0, (*s0).loc.t); LOC11[1] = (*s0).loc.r; addf_181205_2381377266(&nimtv_540656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2); } LA9: ; } goto LA1; LA4: ; { Ropeobj180006* LOC21; TY180507 LOC22; { if (!isextern0) goto LA15; add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_240)); } LA15: ; { if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 22))&63U)))!=0)) goto LA19; add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_241)); } LA19: ; LOC21 = (Ropeobj180006*)0; LOC21 = gettypedesc_537671_839829468(m0, (*s0).loc.t); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], LOC21); memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = (*s0).loc.r; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1); } LA1: ; } N_NIMCALL(void, genvarprototypeaux_546254_839829468)(Tcgen531027* m0, Tsym294834* sym0) { Ropeobj180006* LOC1; { useheader_534369_839829468(m0, sym0); LOC1 = (Ropeobj180006*)0; LOC1 = manglename_535205_839829468(sym0); fillloc_534282_839829468((&(*sym0).loc), ((Tlockind294808) 3), (*sym0).typ, LOC1, ((Tstorageloc294812) 3)); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0); if (LOC4) goto LA5; LOC4 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LA5: ; if (!LOC4) goto LA6; goto BeforeRet; } LA6: ; { if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0)) goto LA14; declarethreadvar_540676_839829468(m0, sym0, NIM_TRUE); } goto LA12; LA14: ; { Ropeobj180006* LOC17; TY180507 LOC30; add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_240)); LOC17 = (Ropeobj180006*)0; LOC17 = gettypedesc_537671_839829468(m0, (*sym0).loc.t); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], LOC17); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA20; add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_53)); } LA20: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 8))&31U)))!=0)) goto LA24; add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_121)); } LA24: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 7))&31U)))!=0)) goto LA28; add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_122)); } LA28: ; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = (*sym0).loc.r; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1); } LA12: ; } LA10: ; }BeforeRet: ; } N_NIMCALL(void, genvarprototype_541236_839829468)(Tcgen531027* m0, Tsym294834* sym0) { genvarprototypeaux_546254_839829468(m0, sym0); } N_NIMCALL(Ropeobj180006*, cgsym_534403_839829468)(Tcgen531027* m0, NimStringDesc* name0) { Ropeobj180006* result0; Tsym294834* sym0; result0 = (Ropeobj180006*)0; sym0 = getcompilerproc_340746_3937434831(name0); { if (!!((sym0 == NIM_NIL))) goto LA3; switch ((*sym0).kind) { case ((Tsymkind294435) 12): case ((Tsymkind294435) 13): case ((Tsymkind294435) 15): case ((Tsymkind294435) 14): { genproc_534951_839829468(m0, sym0); } break; case ((Tsymkind294435) 8): case ((Tsymkind294435) 11): case ((Tsymkind294435) 9): { genvarprototype_541236_839829468(m0, sym0); } break; case ((Tsymkind294435) 7): { Ropeobj180006* LOC8; LOC8 = (Ropeobj180006*)0; LOC8 = gettypedesc_537671_839829468(m0, (*sym0).typ); } break; default: { NimStringDesc* LOC10; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI294435))->Sup.len + 9); appendString(LOC10, ((NimStringDesc*) &T839829468_243)); appendString(LOC10, name0); appendString(LOC10, ((NimStringDesc*) &T839829468_244)); appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI294435))); internalerror_198113_155036129(LOC10); } break; } } goto LA1; LA3: ; { rawmessage_196612_155036129(((Tmsgkind193002) 68), name0); } LA1: ; result0 = (*sym0).loc.r; return result0; } N_NIMCALL(Ropeobj180006*, ropecg_534407_839829468)(Tcgen531027* m0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006* result0; NI i0; NI length0; NI num0; result0 = (Ropeobj180006*)0; i0 = ((NI) 0); length0 = (frmt0 ? frmt0->Sup.len : 0); result0 = NIM_NIL; num0 = ((NI) 0); { while (1) { NI start0; if (!(i0 < length0)) goto LA2; { if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5; i0 += ((NI) 1); switch (((NU8)(frmt0->data[i0]))) { case 36: { add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_19)); i0 += ((NI) 1); } break; case 35: { i0 += ((NI) 1); add_180482_2381377266(&result0, args0[num0]); num0 += ((NI) 1); } break; case 48 ... 57: { NI j0; j0 = ((NI) 0); { while (1) { j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = (length0 <= i0); if (LOC14) goto LA15; LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))); LA15: ; if (!LOC14) goto LA16; goto LA10; } LA16: ; } } LA10: ; num0 = j0; { NimStringDesc* LOC22; NimStringDesc* LOC23; if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20; LOC22 = (NimStringDesc*)0; LOC23 = (NimStringDesc*)0; LOC23 = nimIntToStr(j0); LOC22 = rawNewString(LOC23->Sup.len + 30); appendString(LOC22, ((NimStringDesc*) &T839829468_20)); appendString(LOC22, LOC23); internalerror_198113_155036129(LOC22); } LA20: ; add_180482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]); } break; case 110: { { if (!!(((goptions_171128_2607990831 &(1U<<((NU)(((Toption171009) 10))&31U)))!=0))) goto LA27; add_180482_2381377266(&result0, rnl_180903_2381377266); } LA27: ; i0 += ((NI) 1); } break; case 78: { add_180482_2381377266(&result0, rnl_180903_2381377266); i0 += ((NI) 1); } break; default: { NimStringDesc* LOC31; LOC31 = (NimStringDesc*)0; LOC31 = rawNewString(31); appendString(LOC31, ((NimStringDesc*) &T839829468_20)); appendChar(LOC31, frmt0->data[i0]); internalerror_198113_155036129(LOC31); } break; } } goto LA3; LA5: ; { NIM_BOOL LOC33; NI j0; NimStringDesc* ident0; Ropeobj180006* LOC39; LOC33 = (NIM_BOOL)0; LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC33)) goto LA34; LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95))); LA34: ; if (!LOC33) goto LA35; i0 += ((NI) 1); j0 = i0; { while (1) { if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38; j0 += ((NI) 1); } LA38: ; } ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1))); i0 = j0; LOC39 = (Ropeobj180006*)0; LOC39 = cgsym_534403_839829468(m0, ident0); add_180482_2381377266(&result0, LOC39); } goto LA3; LA35: ; { NIM_BOOL LOC41; NI j0; NimStringDesc* LOC47; Ropeobj180006* LOC48; LOC41 = (NIM_BOOL)0; LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC41)) goto LA42; LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36)); LA42: ; if (!LOC41) goto LA43; i0 += ((NI) 2); j0 = ((NI) 0); { while (1) { if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46; j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); } LA46: ; } LOC47 = (NimStringDesc*)0; LOC47 = HEX24_180856_2381377266(args0[(NI)(j0 - ((NI) 1))]); LOC48 = (Ropeobj180006*)0; LOC48 = cgsym_534403_839829468(m0, LOC47); add_180482_2381377266(&result0, LOC48); } goto LA3; LA43: ; LA3: ; start0 = i0; { while (1) { if (!(i0 < length0)) goto LA50; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36))); if (!(LOC53)) goto LA54; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35))); LA54: ; if (!LOC53) goto LA55; i0 += ((NI) 1); } goto LA51; LA55: ; { goto LA49; } LA51: ; } LA50: ; } LA49: ; { NimStringDesc* LOC62; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60; LOC62 = (NimStringDesc*)0; LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1))); add_180487_2381377266(&result0, LOC62); } LA60: ; } LA2: ; } return result0; } static N_INLINE(NIM_BOOL, crossescppboundary_562754_839829468)(Tcgen531027* m0, Tsym294834* sym0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; Tsym294834* LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); if (!(LOC2)) goto LA3; LOC4 = (Tsym294834*)0; LOC4 = getmodule_301123_2984716966(sym0); LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA5; LOC1 = !((gcmd_171132_2607990831 == ((Tcommands171076) 2))); LA5: ; result0 = LOC1; return result0; } N_NIMCALL(void, genprocprototype_541254_839829468)(Tcgen531027* m0, Tsym294834* sym0) { { useheader_534369_839829468(m0, sym0); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA7; { NIM_BOOL LOC11; Tsym294834* LOC12; NIM_BOOL LOC14; TY534811 LOC17; Ropeobj180006* LOC18; LOC11 = (NIM_BOOL)0; LOC12 = (Tsym294834*)0; LOC12 = getmodule_301123_2984716966(sym0); LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id)); if (!(LOC11)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC11 = !(LOC14); LA13: ; if (!LOC11) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_537671_839829468(m0, (*sym0).loc.t); LOC17[1] = mangledynlibproc_540816_839829468(sym0); LOC18 = (Ropeobj180006*)0; LOC18 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], LOC18); } LA15: ; } goto LA5; LA7: ; { NIM_BOOL LOC20; Ropeobj180006* header0; TY180507 LOC47; Ropeobj180006* LOC48; LOC20 = (NIM_BOOL)0; LOC20 = containsorincl_270862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id); if (!!(LOC20)) goto LA21; header0 = genprocheader_537867_839829468(m0, sym0); { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 14))&31U)))!=0); if (!(LOC25)) goto LA26; LOC25 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 6))&7U)))!=0); LA26: ; if (!LOC25) goto LA27; header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA27: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention294002) 5))); if (!(LOC31)) goto LA32; LOC31 = crossescppboundary_562754_839829468(m0, sym0); LA32: ; if (!LOC31) goto LA33; header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_246), header0); } LA33: ; { NIM_BOOL LOC37; LOC37 = (NIM_BOOL)0; LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0); if (!(LOC37)) goto LA38; LOC37 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 7))&7U)))!=0); LA38: ; if (!LOC37) goto LA39; add_180487_2381377266(&header0, ((NimStringDesc*) &T839829468_247)); } LA39: ; { NIM_BOOL LOC43; LOC43 = (NIM_BOOL)0; LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 14))&31U)))!=0); if (!(LOC43)) goto LA44; LOC43 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 7))&7U)))!=0); LA44: ; if (!LOC43) goto LA45; add_180487_2381377266(&header0, ((NimStringDesc*) &T839829468_248)); } LA45: ; memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = header0; LOC48 = (Ropeobj180006*)0; LOC48 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], LOC48); } goto LA5; LA21: ; LA5: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, usesnativegc_171177_2607990831)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((Tgcmode171080) 5) <= gselectedgc_171133_2607990831); return result0; } N_NIMCALL(void, genrefassign_540311_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY534811 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = (dest0.s == ((Tstorageloc294812) 2)); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = usesnativegc_171177_2607990831(); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_540188_839829468(dest0); LOC8[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2); } goto LA1; LA6: ; { if (!(dest0.s == ((Tstorageloc294812) 3))) goto LA10; { NIM_BOOL LOC14; TY534811 LOC17; LOC14 = (NIM_BOOL)0; LOC14 = canformacycle_322123_3876443242(dest0.t); if (!LOC14) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_540204_839829468(dest0); LOC17[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2); } goto LA12; LA15: ; { TY534811 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_540204_839829468(dest0); LOC19[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2); } LA12: ; } goto LA1; LA10: ; { TY534811 LOC21; memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = addrloc_540204_839829468(dest0); LOC21[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2); } LA1: ; } N_NIMCALL(void, optasgnloc_551788_839829468)(Tloc294816 a0, Ttype294840* t0, Ropeobj180006* field0, Tloc294816* Result) { Ropeobj180006* LOC1; Ropeobj180006* LOC2; (*Result).k = ((Tlockind294808) 5); (*Result).s = a0.s; unsureAsgnRef((void**) (&(*Result).t), t0); LOC1 = (Ropeobj180006*)0; LOC1 = rdloc_540188_839829468(a0); LOC2 = (Ropeobj180006*)0; LOC2 = HEX26_180447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257)); unsureAsgnRef((void**) (&(*Result).r), HEX26_180418_2381377266(LOC2, field0)); } N_NIMCALL(void, genoptasgntuple_552001_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) { Tassignmentflag540302Set newflags0; Ttype294840* t_552053_839829468; Ttype294840* LOC9; { if (!(src0.s == ((Tstorageloc294812) 1))) goto LA3; newflags0 = (flags0 | 1); } goto LA1; LA3: ; { if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag294431) 6))&31U)))!=0)) goto LA6; newflags0 = (flags0 & ~ 1); } goto LA1; LA6: ; { newflags0 = flags0; } LA1: ; LOC9 = (Ttype294840*)0; LOC9 = skiptypes_298099_850551059(dest0.t, IL64(211106232576256)); t_552053_839829468 = getuniquetype_530640_2036603609(LOC9); { NI i_552071_839829468; NI HEX3Atmp_552077_839829468; NI LOC11; NI res_552080_839829468; i_552071_839829468 = (NI)0; HEX3Atmp_552077_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = len_297339_850551059(t_552053_839829468); HEX3Atmp_552077_839829468 = (LOC11 - 1); res_552080_839829468 = ((NI) 0); { while (1) { Ttype294840* t0; Ropeobj180006* field0; TY180507 LOC14; Tloc294816 LOC15; Tloc294816 LOC16; if (!(res_552080_839829468 <= HEX3Atmp_552077_839829468)) goto LA13; i_552071_839829468 = res_552080_839829468; t0 = (*t_552053_839829468).sons->data[i_552071_839829468]; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_180401_2381377266(((NI64) (i_552071_839829468))); field0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_551788_839829468(dest0, t0, field0, (&LOC15)); memset((void*)(&LOC16), 0, sizeof(LOC16)); optasgnloc_551788_839829468(src0, t0, field0, (&LOC16)); genassignment_541264_839829468(p0, LOC15, LOC16, newflags0); res_552080_839829468 += ((NI) 1); } LA13: ; } } } N_NIMCALL(void, gengenericasgn_552167_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) { { NIM_BOOL LOC3; Ttype294840* LOC5; LOC3 = (NIM_BOOL)0; LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag540302) 0))&7U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype294840*)0; LOC5 = skiptypes_298099_850551059(dest0.t, IL64(211106242013440)); LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag294431) 6))&31U)))!=0); LA4: ; if (!LOC3) goto LA6; { NIM_BOOL LOC10; NIM_BOOL LOC12; TY537238 LOC15; LOC10 = (NIM_BOOL)0; LOC10 = (dest0.s == ((Tstorageloc294812) 2)); if (LOC10) goto LA11; LOC12 = (NIM_BOOL)0; LOC12 = usesnativegc_171177_2607990831(); LOC10 = !(LOC12); LA11: ; if (!LOC10) goto LA13; usestringh_534345_839829468((*p0).module); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = addrloc_540204_839829468(dest0); LOC15[1] = addrloc_540204_839829468(src0); LOC15[2] = rdloc_540188_839829468(dest0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3); } goto LA8; LA13: ; { TY537238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_540204_839829468(dest0); LOC17[1] = addrloc_540204_839829468(src0); LOC17[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3); } LA8: ; } goto LA1; LA6: ; { TY537238 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_540204_839829468(dest0); LOC19[1] = addrloc_540204_839829468(src0); LOC19[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3); } LA1: ; } N_NIMCALL(NI, asgncomplexity_551750_839829468)(Tnode294802* n0) { NI result0; result0 = (NI)0; { if (!!((n0 == NIM_NIL))) goto LA3; switch ((*n0).kind) { case ((Tnodekind294020) 3): { result0 = ((NI) 1); } break; case ((Tnodekind294020) 139): { result0 = ((NI) 100); } break; case ((Tnodekind294020) 138): { { Tnode294802* t_551767_839829468; t_551767_839829468 = (Tnode294802*)0; { NI i_551781_839829468; NI HEX3Atmp_551783_839829468; NI LOC10; NI res_551785_839829468; i_551781_839829468 = (NI)0; HEX3Atmp_551783_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = len_295081_850551059(n0); HEX3Atmp_551783_839829468 = (LOC10 - 1); res_551785_839829468 = ((NI) 0); { while (1) { NI LOC13; if (!(res_551785_839829468 <= HEX3Atmp_551783_839829468)) goto LA12; i_551781_839829468 = res_551785_839829468; t_551767_839829468 = (*n0).kindU.S6.sons->data[i_551781_839829468]; LOC13 = (NI)0; LOC13 = asgncomplexity_551750_839829468(t_551767_839829468); result0 += LOC13; res_551785_839829468 += ((NI) 1); } LA12: ; } } } } break; default: { } break; } } LA3: ; return result0; } N_NIMCALL(void, genoptasgnobject_552084_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0, Tnode294802* t0) { Tassignmentflag540302Set newflags0; { { if (!(t0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; { if (!(src0.s == ((Tstorageloc294812) 1))) goto LA7; newflags0 = (flags0 | 1); } goto LA5; LA7: ; { if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag294431) 6))&31U)))!=0)) goto LA10; newflags0 = (flags0 & ~ 1); } goto LA5; LA10: ; { newflags0 = flags0; } LA5: ; switch ((*t0).kind) { case ((Tnodekind294020) 3): { Tsym294834* field0; Tloc294816 LOC14; Tloc294816 LOC15; field0 = (*t0).kindU.S4.sym; memset((void*)(&LOC14), 0, sizeof(LOC14)); optasgnloc_551788_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14)); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_551788_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15)); genassignment_541264_839829468(p0, LOC14, LOC15, newflags0); } break; case ((Tnodekind294020) 138): { { Tnode294802* child_552155_839829468; child_552155_839829468 = (Tnode294802*)0; { NI i_552160_839829468; NI HEX3Atmp_552162_839829468; NI LOC19; NI res_552164_839829468; i_552160_839829468 = (NI)0; HEX3Atmp_552162_839829468 = (NI)0; LOC19 = (NI)0; LOC19 = len_295081_850551059(t0); HEX3Atmp_552162_839829468 = (LOC19 - 1); res_552164_839829468 = ((NI) 0); { while (1) { if (!(res_552164_839829468 <= HEX3Atmp_552162_839829468)) goto LA21; i_552160_839829468 = res_552164_839829468; child_552155_839829468 = (*t0).kindU.S6.sons->data[i_552160_839829468]; genoptasgnobject_552084_839829468(p0, dest0, src0, newflags0, child_552155_839829468); res_552164_839829468 += ((NI) 1); } LA21: ; } } } } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, genassignment_541264_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) { Ttype294840* ty0; { { NIM_BOOL LOC3; TY534811 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = !((src0.t == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = ((*src0.t).kind == ((Ttypekind294244) 21)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_540188_839829468(dest0); LOC7[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2); goto BeforeRet; } LA5: ; ty0 = skiptypes_298099_850551059(dest0.t, IL64(211106233624832)); switch ((*ty0).kind) { case ((Ttypekind294244) 22): { genrefassign_540311_839829468(p0, dest0, src0, flags0); } break; case ((Ttypekind294244) 24): { { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag540302) 0))&7U)))!=0)); if (!(LOC12)) goto LA13; LOC12 = !((src0.s == ((Tstorageloc294812) 1))); LA13: ; if (!LOC12) goto LA14; genrefassign_540311_839829468(p0, dest0, src0, flags0); } goto LA10; LA14: ; { TY537238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_540204_839829468(dest0); LOC17[1] = rdloc_540188_839829468(src0); LOC17[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3); } LA10: ; } break; case ((Ttypekind294244) 28): { { NIM_BOOL LOC21; LOC21 = (NIM_BOOL)0; LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag540302) 0))&7U)))!=0)); if (!(LOC21)) goto LA22; LOC21 = !((src0.s == ((Tstorageloc294812) 1))); LA22: ; if (!LOC21) goto LA23; genrefassign_540311_839829468(p0, dest0, src0, flags0); } goto LA19; LA23: ; { { NIM_BOOL LOC28; NIM_BOOL LOC30; TY534811 LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (dest0.s == ((Tstorageloc294812) 2)); if (LOC28) goto LA29; LOC30 = (NIM_BOOL)0; LOC30 = usesnativegc_171177_2607990831(); LOC28 = !(LOC30); LA29: ; if (!LOC28) goto LA31; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_540188_839829468(dest0); LOC33[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2); } goto LA26; LA31: ; { Tloc294816 tmp0; TY537238 LOC37; TY180507 LOC38; if (!(dest0.s == ((Tstorageloc294812) 3))) goto LA35; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_539032_839829468(p0, ty0, (&tmp0), NIM_FALSE); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_540188_839829468(dest0); LOC37[1] = rdloc_540188_839829468(src0); LOC37[2] = rdloc_540188_839829468(tmp0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_540188_839829468(tmp0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1); } goto LA26; LA35: ; { TY534811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = addrloc_540204_839829468(dest0); LOC40[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2); } LA26: ; } LA19: ; } break; case ((Ttypekind294244) 25): { { NIM_BOOL LOC44; Tloc294816 a0; Ropeobj180006* LOC47; Tloc294816 LOC48; Tloc294816 b0; Ropeobj180006* LOC49; Tloc294816 LOC50; TY534811 LOC51; LOC44 = (NIM_BOOL)0; LOC44 = needscomplexassignment_535509_839829468(dest0.t); if (!LOC44) goto LA45; memset((void*)(&a0), 0, sizeof(a0)); LOC47 = (Ropeobj180006*)0; LOC47 = rope_180277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC48), 0, sizeof(LOC48)); optasgnloc_551788_839829468(dest0, dest0.t, LOC47, (&LOC48)); memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); LOC49 = (Ropeobj180006*)0; LOC49 = rope_180277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC50), 0, sizeof(LOC50)); optasgnloc_551788_839829468(src0, dest0.t, LOC49, (&LOC50)); memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0)); genrefassign_540311_839829468(p0, a0, b0, flags0); memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_540188_839829468(dest0); LOC51[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2); } goto LA42; LA45: ; { TY534811 LOC53; memset((void*)LOC53, 0, sizeof(LOC53)); LOC53[0] = rdloc_540188_839829468(dest0); LOC53[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2); } LA42: ; } break; case ((Ttypekind294244) 18): { { NIM_BOOL LOC57; LOC57 = (NIM_BOOL)0; LOC57 = needscomplexassignment_535509_839829468(dest0.t); if (!LOC57) goto LA58; { NI LOC62; LOC62 = (NI)0; LOC62 = len_297339_850551059(dest0.t); if (!(LOC62 <= ((NI) 4))) goto LA63; genoptasgntuple_552001_839829468(p0, dest0, src0, flags0); } goto LA60; LA63: ; { gengenericasgn_552167_839829468(p0, dest0, src0, flags0); } LA60: ; } goto LA55; LA58: ; { TY534811 LOC67; memset((void*)LOC67, 0, sizeof(LOC67)); LOC67[0] = rdloc_540188_839829468(dest0); LOC67[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2); } LA55: ; } break; case ((Ttypekind294244) 17): { { NIM_BOOL LOC71; TY534811 LOC74; LOC71 = (NIM_BOOL)0; LOC71 = isimportedcpptype_535476_839829468(ty0); if (!LOC71) goto LA72; memset((void*)LOC74, 0, sizeof(LOC74)); LOC74[0] = rdloc_540188_839829468(dest0); LOC74[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2); } goto LA69; LA72: ; { NIM_BOOL LOC76; LOC76 = (NIM_BOOL)0; LOC76 = isobjlackingtypefield_535513_839829468(ty0); if (!!(LOC76)) goto LA77; gengenericasgn_552167_839829468(p0, dest0, src0, flags0); } goto LA69; LA77: ; { NIM_BOOL LOC80; LOC80 = (NIM_BOOL)0; LOC80 = needscomplexassignment_535509_839829468(ty0); if (!LOC80) goto LA81; { NIM_BOOL LOC85; NI LOC87; Ropeobj180006* LOC90; LOC85 = (NIM_BOOL)0; LOC85 = (*ty0).sons->data[((NI) 0)] == 0; if (!(LOC85)) goto LA86; LOC87 = (NI)0; LOC87 = asgncomplexity_551750_839829468((*ty0).n); LOC85 = (LOC87 <= ((NI) 4)); LA86: ; if (!LOC85) goto LA88; LOC90 = (Ropeobj180006*)0; LOC90 = gettypedesc_537671_839829468((*p0).module, ty0); ty0 = getuniquetype_530640_2036603609(ty0); { NimStringDesc* LOC95; if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93; LOC95 = (NimStringDesc*)0; LOC95 = HEX24_198185_1689653243(T839829468_264); internalerror_198113_155036129(LOC95); } LA93: ; genoptasgnobject_552084_839829468(p0, dest0, src0, flags0, (*ty0).n); } goto LA83; LA88: ; { gengenericasgn_552167_839829468(p0, dest0, src0, flags0); } LA83: ; } goto LA69; LA81: ; { TY534811 LOC98; memset((void*)LOC98, 0, sizeof(LOC98)); LOC98[0] = rdloc_540188_839829468(dest0); LOC98[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2); } LA69: ; } break; case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = needscomplexassignment_535509_839829468(dest0.t); if (!LOC102) goto LA103; gengenericasgn_552167_839829468(p0, dest0, src0, flags0); } goto LA100; LA103: ; { TY537238 LOC106; usestringh_534345_839829468((*p0).module); memset((void*)LOC106, 0, sizeof(LOC106)); LOC106[0] = rdloc_540188_839829468(dest0); LOC106[1] = rdloc_540188_839829468(src0); LOC106[2] = gettypedesc_537671_839829468((*p0).module, ty0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3); } LA100: ; } break; case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { { NIM_BOOL LOC110; TY537238 LOC113; LOC110 = (NIM_BOOL)0; LOC110 = needscomplexassignment_535509_839829468(dest0.t); if (!LOC110) goto LA111; memset((void*)LOC113, 0, sizeof(LOC113)); LOC113[0] = addrloc_540204_839829468(dest0); LOC113[1] = addrloc_540204_839829468(src0); LOC113[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3); } goto LA108; LA111: ; { TY534811 LOC115; usestringh_534345_839829468((*p0).module); memset((void*)LOC115, 0, sizeof(LOC115)); LOC115[0] = rdloc_540188_839829468(dest0); LOC115[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2); } LA108: ; } break; case ((Ttypekind294244) 19): { { Tctypekind531007 LOC119; TY537238 LOC122; NI64 LOC123; LOC119 = (Tctypekind531007)0; LOC119 = maptype_535393_839829468(ty0); if (!(LOC119 == ((Tctypekind531007) 17))) goto LA120; usestringh_534345_839829468((*p0).module); memset((void*)LOC122, 0, sizeof(LOC122)); LOC122[0] = rdloc_540188_839829468(dest0); LOC122[1] = rdloc_540188_839829468(src0); LOC123 = (NI64)0; LOC123 = getsize_322135_3876443242(dest0.t); LOC122[2] = rope_180401_2381377266(LOC123); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3); } goto LA117; LA120: ; { TY534811 LOC125; memset((void*)LOC125, 0, sizeof(LOC125)); LOC125[0] = rdloc_540188_839829468(dest0); LOC125[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2); } LA117: ; } break; case ((Ttypekind294244) 21): case ((Ttypekind294244) 26): case ((Ttypekind294244) 2): case ((Ttypekind294244) 1): case ((Ttypekind294244) 14): case ((Ttypekind294244) 29): case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44): case ((Ttypekind294244) 20): case ((Ttypekind294244) 23): { TY534811 LOC127; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rdloc_540188_839829468(dest0); LOC127[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2); } break; default: { NimStringDesc* LOC129; LOC129 = (NimStringDesc*)0; LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI294244))->Sup.len + 15); appendString(LOC129, ((NimStringDesc*) &T839829468_269)); appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI294244))); internalerror_198113_155036129(LOC129); } break; } }BeforeRet: ; } N_NIMCALL(void, putlocintodest_541258_839829468)(Tcproc531021* p0, Tloc294816* d0, Tloc294816 s0) { { if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag294810) 2))&15U)))!=0)) goto LA7; genassignment_541264_839829468(p0, (*d0), s0, 0); } goto LA5; LA7: ; { genassignment_541264_839829468(p0, (*d0), s0, 1); } LA5: ; } goto LA1; LA3: ; { genericAssign((void*)(&(*d0)), (void*)(&s0), (&NTI294816)); } LA1: ; } N_NIMCALL(NIM_BOOL, issimpleconst_534311_839829468)(Ttype294840* typ0) { NIM_BOOL result0; Ttype294840* t0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; t0 = skiptypes_298099_850551059(typ0, IL64(211106240964864)); LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).kind == ((Ttypekind294244) 18) || (*t0).kind == ((Ttypekind294244) 17) || (*t0).kind == ((Ttypekind294244) 16) || (*t0).kind == ((Ttypekind294244) 4) || (*t0).kind == ((Ttypekind294244) 19) || (*t0).kind == ((Ttypekind294244) 24))); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind294244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention294002) 8)); LA4: ; LOC1 = !(LOC3); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putintodest_552468_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0, Tstorageloc294812 s0) { Tloc294816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3; initloc_534273_839829468((&a0), ((Tlockind294808) 6), t0, s0); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag294810) 2))&15U)))!=0)) goto LA7; genassignment_541264_839829468(p0, (*d0), a0, 0); } goto LA5; LA7: ; { genassignment_541264_839829468(p0, (*d0), a0, 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind294808) 6); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NI64, bitsettoword_551578_839829468)(Tbitset341004* s0, NI size0) { NI64 result0; result0 = (NI64)0; result0 = IL64(0); { NI j_551612_839829468; NI HEX3Atmp_551622_839829468; NI res_551625_839829468; j_551612_839829468 = (NI)0; HEX3Atmp_551622_839829468 = (NI)0; HEX3Atmp_551622_839829468 = (NI)(size0 - ((NI) 1)); res_551625_839829468 = ((NI) 0); { while (1) { if (!(res_551625_839829468 <= HEX3Atmp_551622_839829468)) goto LA3; j_551612_839829468 = res_551625_839829468; { if (!(j_551612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6; result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_551612_839829468]))) << (NU64)(((NI64) ((NI)(j_551612_839829468 * ((NI) 8))))))); } LA6: ; res_551625_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(Ropeobj180006*, genrawsetdata_551629_839829468)(Tbitset341004* cs0, NI size0) { Ropeobj180006* result0; NimStringDesc* frmt0; result0 = (Ropeobj180006*)0; frmt0 = (NimStringDesc*)0; { TY535289 LOC5; if (!(((NI) 8) < size0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0); { NI i_551649_839829468; NI HEX3Atmp_551657_839829468; NI res_551660_839829468; i_551649_839829468 = (NI)0; HEX3Atmp_551657_839829468 = (NI)0; HEX3Atmp_551657_839829468 = (NI)(size0 - ((NI) 1)); res_551660_839829468 = ((NI) 0); { while (1) { TY180507 LOC19; NimStringDesc* LOC20; if (!(res_551660_839829468 <= HEX3Atmp_551657_839829468)) goto LA8; i_551649_839829468 = res_551660_839829468; { if (!(i_551649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11; { if (!(((NI) ((NI)((NI)(i_551649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15; frmt0 = copyString(((NimStringDesc*) &T839829468_274)); } goto LA13; LA15: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_275)); } LA13: ; } goto LA9; LA11: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_276)); } LA9: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (NimStringDesc*)0; LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_551649_839829468])), ((NI) 2)); LOC19[0] = rope_180277_2381377266(LOC20); addf_181205_2381377266(&result0, frmt0, LOC19, 1); res_551660_839829468 += ((NI) 1); } LA8: ; } } } goto LA1; LA3: ; { NI64 LOC22; LOC22 = (NI64)0; LOC22 = bitsettoword_551578_839829468(cs0, size0); result0 = intliteral_541270_839829468(LOC22); } LA1: ; return result0; } N_NIMCALL(void, appcg_534640_839829468)(Tcgen531027* m0, Tcfilesection531005 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) { Ropeobj180006* LOC1; LOC1 = (Ropeobj180006*)0; LOC1 = ropecg_534407_839829468(m0, frmt0, args0, args0Len0); add_180482_2381377266(&(*m0).s[(s0)- 0], LOC1); } N_NIMCALL(Ropeobj180006*, genconstseq_561371_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* t0) { Ropeobj180006* result0; Ropeobj180006* data0; TY180507 LOC1; NI LOC2; TY537235 LOC18; NI LOC19; TY534811 LOC20; result0 = (Ropeobj180006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = len_295081_850551059(n0); LOC1[0] = rope_180401_2381377266(((NI64) (LOC2))); data0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1); { NI LOC5; LOC5 = (NI)0; LOC5 = len_295081_850551059(n0); if (!(((NI) 0) < LOC5)) goto LA6; add_180487_2381377266(&data0, ((NimStringDesc*) &T839829468_278)); { NI i_561395_839829468; NI HEX3Atmp_561411_839829468; NI LOC9; NI res_561414_839829468; i_561395_839829468 = (NI)0; HEX3Atmp_561411_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = len_295081_850551059(n0); HEX3Atmp_561411_839829468 = (NI)(LOC9 - ((NI) 1)); res_561414_839829468 = ((NI) 0); { while (1) { Ropeobj180006* LOC17; if (!(res_561414_839829468 <= HEX3Atmp_561411_839829468)) goto LA11; i_561395_839829468 = res_561414_839829468; { TY535289 LOC16; if (!(((NI) 0) < i_561395_839829468)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); addf_181205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0); } LA14: ; LOC17 = (Ropeobj180006*)0; LOC17 = genconstexpr_556849_839829468(p0, (*n0).kindU.S6.sons->data[i_561395_839829468]); add_180482_2381377266(&data0, LOC17); res_561414_839829468 += ((NI) 1); } LA11: ; } } add_180487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); } LA6: ; add_180487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); result0 = gettempname_535596_839829468((*p0).module); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = gettypedesc_537671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); LOC19 = (NI)0; LOC19 = len_295081_850551059(n0); LOC18[1] = rope_180401_2381377266(((NI64) (LOC19))); LOC18[2] = result0; LOC18[3] = data0; appcg_534640_839829468((*p0).module, ((Tcfilesection531005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4); memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC20[1] = result0; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2); return result0; } N_NIMCALL(Ropeobj180006*, gennamedconstexpr_561284_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { if (!((*n0).kind == ((Tnodekind294020) 34))) goto LA3; result0 = genconstexpr_556849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA3: ; { result0 = genconstexpr_556849_839829468(p0, n0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, genconstsimplelist_561299_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; NI length0; TY535289 LOC10; result0 = (Ropeobj180006*)0; length0 = sonslen_297351_850551059(n0); result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_223)); { NI i_561333_839829468; NI HEX3Atmp_561362_839829468; NI HEX3Atmp_561363_839829468; NI res_561366_839829468; i_561333_839829468 = (NI)0; HEX3Atmp_561362_839829468 = (NI)0; HEX3Atmp_561363_839829468 = (NI)0; HEX3Atmp_561362_839829468 = ((*n0).kind == ((Tnodekind294020) 38)); HEX3Atmp_561363_839829468 = (NI)(length0 - ((NI) 2)); res_561366_839829468 = ((NI) (HEX3Atmp_561362_839829468)); { while (1) { TY180507 LOC4; if (!(res_561366_839829468 <= HEX3Atmp_561363_839829468)) goto LA3; i_561333_839829468 = res_561366_839829468; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = gennamedconstexpr_561284_839829468(p0, (*n0).kindU.S6.sons->data[i_561333_839829468]); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1); res_561366_839829468 += ((NI) 1); } LA3: ; } } { Ropeobj180006* LOC9; if (!(((NI) (((*n0).kind == ((Tnodekind294020) 38)))) < length0)) goto LA7; LOC9 = (Ropeobj180006*)0; LOC9 = gennamedconstexpr_561284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]); add_180482_2381377266(&result0, LOC9); } LA7: ; memset((void*)LOC10, 0, sizeof(LOC10)); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0); return result0; } N_NIMCALL(Ropeobj180006*, genconstexpr_556849_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; switch ((*n0).kind) { case ((Tnodekind294020) 58): case ((Tnodekind294020) 59): { result0 = genconstexpr_556849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } break; case ((Tnodekind294020) 39): { Tbitset341004* cs0; NI64 LOC3; cs0 = (Tbitset341004*)0; tobitset_342001_452470228(n0, (&cs0)); LOC3 = (NI64)0; LOC3 = getsize_322135_3876443242((*n0).typ); result0 = genrawsetdata_551629_839829468(cs0, ((NI) (LOC3))); } break; case ((Tnodekind294020) 41): case ((Tnodekind294020) 37): case ((Tnodekind294020) 155): case ((Tnodekind294020) 38): { Ttype294840* t0; t0 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256)); { if (!((*t0).kind == ((Ttypekind294244) 24))) goto LA7; result0 = genconstseq_561371_839829468(p0, n0, t0); } goto LA5; LA7: ; { result0 = genconstsimplelist_561299_839829468(p0, n0); } LA5: ; } break; default: { Tloc294816 d0; memset((void*)(&d0), 0, sizeof(d0)); initlocexpr_541283_839829468(p0, n0, (&d0)); result0 = rdloc_540188_839829468(d0); } break; } return result0; } N_NIMCALL(void, requestconstimpl_541240_839829468)(Tcproc531021* p0, Tsym294834* sym0) { Tcgen531027* m0; Tcgen531027* q0; { m0 = (*p0).module; useheader_534369_839829468(m0, sym0); { Ropeobj180006* LOC5; if (!((*sym0).loc.k == ((Tlockind294808) 0))) goto LA3; LOC5 = (Ropeobj180006*)0; LOC5 = manglename_535205_839829468(sym0); fillloc_534282_839829468((&(*sym0).loc), ((Tlockind294808) 8), (*sym0).typ, LOC5, ((Tstorageloc294812) 1)); } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA8; goto BeforeRet; } LA8: ; q0 = findpendingmodule_534241_839829468(m0, sym0); { NIM_BOOL LOC12; NIM_BOOL LOC14; TY537238 LOC17; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_537671_839829468(q0, (*sym0).typ); LOC17[1] = (*sym0).loc.r; LOC17[2] = genconstexpr_556849_839829468((*q0).initproc, (*sym0).ast); addf_181205_2381377266(&(*q0).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; { NIM_BOOL LOC20; NIM_BOOL LOC22; Ropeobj180006* headerdecl0; TY534811 LOC25; LOC20 = (NIM_BOOL)0; LOC20 = !((q0 == m0)); if (!(LOC20)) goto LA21; LOC22 = (NIM_BOOL)0; LOC22 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC20 = !(LOC22); LA21: ; if (!LOC20) goto LA23; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = gettypedesc_537671_839829468(m0, (*sym0).loc.t); LOC25[1] = (*sym0).loc.r; headerdecl0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 8))- 0], headerdecl0); { NIM_BOOL LOC28; LOC28 = (NIM_BOOL)0; LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 6))&31U)))!=0); if (!(LOC28)) goto LA29; LOC28 = !((generatedheader_534201_839829468 == NIM_NIL)); LA29: ; if (!LOC28) goto LA30; add_180482_2381377266(&(*generatedheader_534201_839829468).s[(((Tcfilesection531005) 8))- 0], headerdecl0); } LA30: ; } LA23: ; }BeforeRet: ; } N_NIMCALL(void, gencomplexconst_560249_839829468)(Tcproc531021* p0, Tsym294834* sym0, Tloc294816* d0) { requestconstimpl_541240_839829468(p0, sym0); putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } static N_INLINE(Ropeobj180006**, procsec_531194_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0) { Ropeobj180006** result0; result0 = (Ropeobj180006**)0; result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0]; return result0; } N_NIMCALL(void, accessthreadlocalvar_534945_839829468)(Tcproc531021* p0, Tsym294834* s0) { { NIM_BOOL LOC3; Ropeobj180006** LOC7; TY535289 LOC8; Ropeobj180006** LOC9; TY535289 LOC10; Ropeobj180006* LOC11; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_534949_839829468(); if (!(LOC3)) goto LA4; LOC3 = !((*p0).threadvaraccessed); LA4: ; if (!LOC3) goto LA5; (*p0).threadvaraccessed = NIM_TRUE; (*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag531025) 1))%(sizeof(NU8)*8)); LOC7 = (Ropeobj180006**)0; LOC7 = procsec_531194_3723162438(p0, ((Tcprocsection531011) 0)); memset((void*)LOC8, 0, sizeof(LOC8)); addf_181205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0); LOC9 = (Ropeobj180006**)0; LOC9 = procsec_531194_3723162438(p0, ((Tcprocsection531011) 1)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC11 = (Ropeobj180006*)0; LOC11 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0); add_180482_2381377266(LOC9, LOC11); } LA5: ; } static N_INLINE(NIM_BOOL, isemptytype_299440_850551059)(Ttype294840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = (t0 == NIM_NIL); if (LOC1) goto LA2; LOC1 = ((*t0).kind == ((Ttypekind294244) 62) || (*t0).kind == ((Ttypekind294244) 7)); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putdataintodest_552436_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0) { Tloc294816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3; initloc_534273_839829468((&a0), ((Tlockind294808) 8), t0, ((Tstorageloc294812) 1)); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag294810) 2))&15U)))!=0)) goto LA7; genassignment_541264_839829468(p0, (*d0), a0, 0); } goto LA5; LA7: ; { genassignment_541264_839829468(p0, (*d0), a0, 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind294808) 8); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NIM_BOOL, freshlineinfo_534818_839829468)(Tcproc531021* p0, Tlineinfo193336 info0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*p0).lastlineinfo.line == info0.line)); if (LOC3) goto LA4; LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex)); LA4: ; if (!LOC3) goto LA5; (*p0).lastlineinfo.line = info0.line; (*p0).lastlineinfo.fileindex = info0.fileindex; result0 = NIM_TRUE; } LA5: ; return result0; } N_NIMCALL(void, genlinedir_534823_839829468)(Tcproc531021* p0, Tnode294802* t0) { NI line0; Ropeobj180006** LOC11; NimStringDesc* LOC12; line0 = safelinenm_534721_839829468((*t0).info); { Ropeobj180006** LOC5; TY535289 LOC6; Ropeobj180006* LOC7; Ropeobj180006* LOC8; Ropeobj180006* LOC9; Ropeobj180006* LOC10; if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 28))&63U)))!=0)) goto LA3; LOC5 = (Ropeobj180006**)0; LOC5 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj180006*)0; LOC7 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0); LOC8 = (Ropeobj180006*)0; LOC8 = sourceline_194068_155036129((*t0).info); LOC9 = (Ropeobj180006*)0; LOC9 = HEX26_180418_2381377266(LOC7, LOC8); LOC10 = (Ropeobj180006*)0; LOC10 = HEX26_180418_2381377266(LOC9, rnl_180903_2381377266); add_180482_2381377266(LOC5, LOC10); } LA3: ; LOC11 = (Ropeobj180006**)0; LOC11 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); LOC12 = (NimStringDesc*)0; LOC12 = tofullpath_194264_155036129((*t0).info.fileindex); genclinedir_534725_839829468(LOC11, LOC12, line0); { NIM_BOOL LOC15; NIM_BOOL LOC17; LOC15 = (NIM_BOOL)0; LOC15 = ((163840 & (*p0).options) == 163840); if (!(LOC15)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = ((*p0).prc == NIM_NIL); if (LOC17) goto LA18; LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)); LA18: ; LOC15 = LOC17; LA16: ; if (!LOC15) goto LA19; { NIM_BOOL LOC23; TY534811 LOC26; NimStringDesc* LOC27; LOC23 = (NIM_BOOL)0; LOC23 = freshlineinfo_534818_839829468(p0, (*t0).info); if (!LOC23) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rope_180401_2381377266(((NI64) (line0))); LOC27 = (NimStringDesc*)0; LOC27 = tofilename_194260_155036129((*t0).info.fileindex); LOC26[1] = makecstring_193638_155036129(LOC27); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2); } LA24: ; } goto LA13; LA19: ; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC32; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((98304 & (*p0).options) == 98304); if (!(LOC30)) goto LA31; LOC32 = (NIM_BOOL)0; LOC32 = ((*p0).prc == NIM_NIL); if (LOC32) goto LA33; LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)); LA33: ; LOC30 = LOC32; LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA34; LOC29 = (((NI32) 0) <= (*t0).info.fileindex); LA34: ; if (!LOC29) goto LA35; { NIM_BOOL LOC39; TY534811 LOC42; LOC39 = (NIM_BOOL)0; LOC39 = freshlineinfo_534818_839829468(p0, (*t0).info); if (!LOC39) goto LA40; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rope_180401_2381377266(((NI64) (line0))); LOC42[1] = quotedfilename_198818_155036129((*t0).info); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2); } LA40: ; } goto LA13; LA35: ; LA13: ; } N_NIMCALL(Ropeobj180006*, getlabel_541217_839829468)(Tcproc531021* p0) { Ropeobj180006* result0; Ropeobj180006* LOC1; result0 = (Ropeobj180006*)0; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj180006*)0; LOC1 = rope_180401_2381377266(((NI64) ((*p0).labels))); result0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC1); return result0; } N_NIMCALL(void, fixlabel_541230_839829468)(Tcproc531021* p0, Ropeobj180006* labl0) { TY180507 LOC1; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = labl0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1); } N_NIMCALL(void, genandor_556311_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) { Ropeobj180006* L0; Tloc294816 tmp0; L0 = (Ropeobj180006*)0; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_539032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); (*p0).splitdecls += ((NI) 1); expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); L0 = getlabel_541217_839829468(p0); { TY534811 LOC5; if (!(m0 == ((Tmagic294524) 127))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(tmp0); LOC5[1] = L0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2); } goto LA1; LA3: ; { TY534811 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_540188_839829468(tmp0); LOC7[1] = L0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2); } LA1: ; expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0)); fixlabel_541230_839829468(p0, L0); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA10; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI294816)); } goto LA8; LA10: ; { genassignment_541264_839829468(p0, (*d0), tmp0, 0); } LA8: ; (*p0).splitdecls -= ((NI) 1); } N_NIMCALL(void, unaryarith_554646_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) { Tloc294816 a0; Ttype294840* t0; TY537238 LOC1; NI64 LOC2; Ropeobj180006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype294840*)0; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); LOC2 = (NI64)0; LOC2 = getsize_322135_3876443242(t0); LOC1[1] = rope_180401_2381377266((NI64)(LOC2 * IL64(8))); LOC1[2] = getsimpletypedesc_535936_839829468((*p0).module, (*e0).typ); LOC3 = (Ropeobj180006*)0; LOC3 = HEX25_180905_2381377266(unarithtab_554653_839829468[(op0)- 99], LOC1, 3); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc294812) 0)); } N_NIMCALL(void, unaryarithoverflow_553633_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) { Tloc294816 a0; Ttype294840* t0; TY534811 LOC7; NI64 LOC8; Ropeobj180006* LOC9; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype294840*)0; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832)); { TY534811 LOC5; NI64 LOC6; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(a0); LOC6 = (NI64)0; LOC6 = firstord_322001_3876443242(t0); LOC5[1] = intliteral_541270_839829468(LOC6); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2); } LA3: ; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_540188_839829468(a0); LOC8 = (NI64)0; LOC8 = getsize_322135_3876443242(t0); LOC7[1] = rope_180401_2381377266((NI64)(LOC8 * IL64(8))); LOC9 = (Ropeobj180006*)0; LOC9 = HEX25_180905_2381377266(opr_553640_839829468[(m0)- 96], LOC7, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc294812) 0)); } N_NIMCALL(void, binaryarith_553819_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) { Tloc294816 a0; Tloc294816 b0; NI64 s0; NI64 LOC1; NI64 LOC2; TY537235 LOC3; Ropeobj180006* LOC4; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); s0 = (NI64)0; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (NI64)0; LOC1 = getsize_322135_3876443242(a0.t); LOC2 = (NI64)0; LOC2 = getsize_322135_3876443242(b0.t); s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8)); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = rdloc_540188_839829468(a0); LOC3[1] = rdloc_540188_839829468(b0); LOC3[2] = rope_180401_2381377266(s0); LOC3[3] = getsimpletypedesc_535936_839829468((*p0).module, (*e0).typ); LOC4 = (Ropeobj180006*)0; LOC4 = HEX25_180905_2381377266(binarithtab_553826_839829468[(op0)- 52], LOC3, 4); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc294812) 0)); } N_NIMCALL(void, binaryfloatarith_558728_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) { { Tloc294816 a0; Tloc294816 b0; TY537235 LOC5; Tnode294802* LOC6; Ropeobj180006* LOC7; if (!!(((384 & (*p0).options) == 0))) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_180277_2381377266(opr_558762_839829468[(m0)- 52]); LOC5[1] = rdloc_540188_839829468(a0); LOC5[2] = rdloc_540188_839829468(b0); LOC6 = (Tnode294802*)0; LOC6 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1)); LOC5[3] = getsimpletypedesc_535936_839829468((*p0).module, (*LOC6).typ); LOC7 = (Ropeobj180006*)0; LOC7 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc294812) 0)); { TY180507 LOC12; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 7))&31U)))!=0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_540188_839829468((*d0)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1); } LA10: ; { TY180507 LOC17; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 8))&31U)))!=0)) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_540188_839829468((*d0)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1); } LA15: ; } goto LA1; LA3: ; { binaryarith_553819_839829468(p0, e0, d0, m0); } LA1: ; } N_NIMCALL(void, geneqproc_554214_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype294840* LOC3; TY534811 LOC6; Ropeobj180006* LOC7; LOC3 = (Ttype294840*)0; LOC3 = skiptypes_298099_850551059(a0.t, IL64(211106232576256)); if (!((*LOC3).callconv == ((Tcallingconvention294002) 8))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_540188_839829468(a0); LOC6[1] = rdloc_540188_839829468(b0); LOC7 = (Ropeobj180006*)0; LOC7 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc294812) 0)); } goto LA1; LA4: ; { TY534811 LOC9; Ropeobj180006* LOC10; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rdloc_540188_839829468(a0); LOC9[1] = rdloc_540188_839829468(b0); LOC10 = (Ropeobj180006*)0; LOC10 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc294812) 0)); } LA1: ; } N_NIMCALL(Ropeobj180006*, rdcharloc_540227_839829468)(Tloc294816 a0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = rdloc_540188_839829468(a0); { Ttype294840* LOC3; TY180507 LOC6; LOC3 = (Ttype294840*)0; LOC3 = skiptypes_298099_850551059(a0.t, IL64(211106233624832)); if (!((*LOC3).kind == ((Ttypekind294244) 2))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1); } LA4: ; return result0; } N_NIMCALL(Ropeobj180006*, binaryarithoverflowraw_553235_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816 a0, Tloc294816 b0, NimStringDesc* frmt0) { Ropeobj180006* result0; NI64 size0; Ropeobj180006* storage0; TY534811 LOC6; TY537238 LOC7; result0 = (Ropeobj180006*)0; size0 = getsize_322135_3876443242(t0); { if (!(size0 < ((NI64) (intsize_178641_4151366050)))) goto LA3; storage0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_36)); } goto LA1; LA3: ; { storage0 = gettypedesc_537671_839829468((*p0).module, t0); } LA1: ; result0 = gettempname_535596_839829468((*p0).module); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = storage0; LOC6[1] = result0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = result0; LOC7[1] = rdcharloc_540227_839829468(a0); LOC7[2] = rdcharloc_540227_839829468(b0); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC7, 3); { NIM_BOOL LOC10; TY537238 LOC14; NI64 LOC15; NI64 LOC16; LOC10 = (NIM_BOOL)0; LOC10 = (size0 < ((NI64) (intsize_178641_4151366050))); if (LOC10) goto LA11; LOC10 = ((*t0).kind == ((Ttypekind294244) 20) || (*t0).kind == ((Ttypekind294244) 14)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC15 = (NI64)0; LOC15 = firstord_322001_3876443242(t0); LOC14[1] = intliteral_541270_839829468(LOC15); LOC16 = (NI64)0; LOC16 = lastord_322004_3876443242(t0); LOC14[2] = intliteral_541270_839829468(LOC16); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3); } LA12: ; return result0; } N_NIMCALL(void, binaryarithoverflow_553262_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) { Tloc294816 a0; Tloc294816 b0; Ttype294840* t0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832)); { Ropeobj180006* res0; TY537238 LOC5; if (!!((((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC5[1] = rdloc_540188_839829468(a0); LOC5[2] = rdloc_540188_839829468(b0); res0 = HEX25_180905_2381377266(opr_553279_839829468[(m0)- 45], LOC5, 3); putintodest_552468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc294812) 0)); } goto LA1; LA3: ; { Ropeobj180006* res0; NimStringDesc* LOC7; TY534811 LOC13; Ropeobj180006* LOC14; LOC7 = (NimStringDesc*)0; { if (!((*t0).kind == ((Ttypekind294244) 35))) goto LA10; LOC7 = copyString(prc64_553274_839829468[(m0)- 45]); } goto LA8; LA10: ; { LOC7 = copyString(prc_553269_839829468[(m0)- 45]); } LA8: ; res0 = binaryarithoverflowraw_553235_839829468(p0, t0, a0, b0, LOC7); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC13[1] = res0; LOC14 = (Ropeobj180006*)0; LOC14 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc294812) 0)); } LA1: ; } N_NIMCALL(Ropeobj180006*, lenfield_541305_839829468)(Tcproc531021* p0) { Ropeobj180006* result0; NimStringDesc* LOC1; result0 = (Ropeobj180006*)0; LOC1 = (NimStringDesc*)0; { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC4) goto LA5; LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA5: ; if (!LOC4) goto LA6; LOC1 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA2; LA6: ; { LOC1 = copyString(((NimStringDesc*) &T839829468_158)); } LA2: ; result0 = rope_180277_2381377266(LOC1); return result0; } N_NIMCALL(void, gcusage_556439_839829468)(Tnode294802* n0) { { NimStringDesc* LOC5; if (!(gselectedgc_171133_2607990831 == ((Tgcmode171080) 0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = rendertree_313044_382274130(n0, 0); message_198095_155036129((*n0).info, ((Tmsgkind193002) 263), LOC5); } LA3: ; } N_NIMCALL(void, genrepr_557339_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* t0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); switch ((*t0).kind) { case ((Ttypekind294244) 31) ... ((Ttypekind294244) 35): case ((Ttypekind294244) 40) ... ((Ttypekind294244) 44): { TY180507 LOC2; Ropeobj180006* LOC3; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_540188_839829468(a0); LOC3 = (Ropeobj180006*)0; LOC3 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC3, a0.s); } break; case ((Ttypekind294244) 36) ... ((Ttypekind294244) 39): { TY180507 LOC5; Ropeobj180006* LOC6; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(a0); LOC6 = (Ropeobj180006*)0; LOC6 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } break; case ((Ttypekind294244) 1): { TY180507 LOC8; Ropeobj180006* LOC9; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_540188_839829468(a0); LOC9 = (Ropeobj180006*)0; LOC9 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC9, a0.s); } break; case ((Ttypekind294244) 2): { TY180507 LOC11; Ropeobj180006* LOC12; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_540188_839829468(a0); LOC12 = (Ropeobj180006*)0; LOC12 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC12, a0.s); } break; case ((Ttypekind294244) 14): case ((Ttypekind294244) 15): { TY534811 LOC14; Ropeobj180006* LOC15; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_540188_839829468(a0); LOC14[1] = gentypeinfo_537941_839829468((*p0).module, t0); LOC15 = (Ropeobj180006*)0; LOC15 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } break; case ((Ttypekind294244) 28): { TY180507 LOC17; Ropeobj180006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_540188_839829468(a0); LOC18 = (Ropeobj180006*)0; LOC18 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } break; case ((Ttypekind294244) 19): { TY534811 LOC20; Ropeobj180006* LOC21; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = addrloc_540204_839829468(a0); LOC20[1] = gentypeinfo_537941_839829468((*p0).module, t0); LOC21 = (Ropeobj180006*)0; LOC21 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC21, a0.s); } break; case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { Tloc294816 b0; TY534811 LOC34; Ttype294840* LOC35; Ropeobj180006* LOC36; memset((void*)(&b0), 0, sizeof(b0)); switch ((*a0.t).kind) { case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { TY180507 LOC24; Ropeobj180006* LOC25; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = rdloc_540188_839829468(a0); LOC25 = (Ropeobj180006*)0; LOC25 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1); putintodest_552468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s); } break; case ((Ttypekind294244) 28): case ((Ttypekind294244) 24): { TY534811 LOC27; Ropeobj180006* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rdloc_540188_839829468(a0); LOC27[1] = lenfield_541305_839829468(p0); LOC28 = (Ropeobj180006*)0; LOC28 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2); putintodest_552468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s); } break; case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { TY534811 LOC30; NI64 LOC31; Ropeobj180006* LOC32; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = rdloc_540188_839829468(a0); LOC31 = (NI64)0; LOC31 = lengthord_322007_3876443242(a0.t); LOC30[1] = rope_180401_2381377266(LOC31); LOC32 = (Ropeobj180006*)0; LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2); putintodest_552468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s); } break; default: { internalerror_198100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381)); } break; } memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_540188_839829468(b0); LOC35 = (Ttype294840*)0; LOC35 = elemtype_322394_3876443242(t0); LOC34[1] = gentypeinfo_537941_839829468((*p0).module, LOC35); LOC36 = (Ropeobj180006*)0; LOC36 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC36, a0.s); } break; case ((Ttypekind294244) 29): case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): case ((Ttypekind294244) 22): case ((Ttypekind294244) 21): case ((Ttypekind294244) 26): case ((Ttypekind294244) 5): case ((Ttypekind294244) 24): { TY534811 LOC38; Ropeobj180006* LOC39; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_540188_839829468(a0); LOC38[1] = gentypeinfo_537941_839829468((*p0).module, t0); LOC39 = (Ropeobj180006*)0; LOC39 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC39, a0.s); } break; case ((Ttypekind294244) 3): case ((Ttypekind294244) 62): { localerror_198085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384)); } break; default: { TY534811 LOC42; Ropeobj180006* LOC43; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = addrloc_540204_839829468(a0); LOC42[1] = gentypeinfo_537941_839829468((*p0).module, t0); LOC43 = (Ropeobj180006*)0; LOC43 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC43, a0.s); } break; } gcusage_556439_839829468(e0); } N_NIMCALL(void, gengettypeinfo_557383_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Ttype294840* t0; Ropeobj180006* LOC1; t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); LOC1 = (Ropeobj180006*)0; LOC1 = gentypeinfo_537941_839829468((*p0).module, t0); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc294812) 0)); } N_NIMCALL(void, genswap_557638_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Tloc294816 tmp0; Ttype294840* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); LOC1 = (Ttype294840*)0; LOC1 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); gettemp_539032_839829468(p0, LOC1, (&tmp0), NIM_FALSE); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); genassignment_541264_839829468(p0, tmp0, a0, 0); genassignment_541264_839829468(p0, a0, b0, 0); genassignment_541264_839829468(p0, b0, tmp0, 0); } N_NIMCALL(void, unaryexpr_553209_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; TY180507 LOC1; Ropeobj180006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0)); } N_NIMCALL(void, binarystmt_552501_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; Tloc294816 b0; TY534811 LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3; internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387)); } LA3: ; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(a0); LOC5[1] = rdloc_540188_839829468(b0); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC5, 2); } N_NIMCALL(void, genstrconcat_556452_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 tmp0; NI L0; Ropeobj180006* appends0; Ropeobj180006* lens0; TY537238 LOC21; Ropeobj180006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_539032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); L0 = ((NI) 0); appends0 = NIM_NIL; lens0 = NIM_NIL; { NI i_556475_839829468; NI HEX3Atmp_556547_839829468; NI LOC2; NI res_556550_839829468; i_556475_839829468 = (NI)0; HEX3Atmp_556547_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(e0); HEX3Atmp_556547_839829468 = (NI)(LOC2 - ((NI) 2)); res_556550_839829468 = ((NI) 0); { while (1) { if (!(res_556550_839829468 <= HEX3Atmp_556547_839829468)) goto LA4; i_556475_839829468 = res_556550_839829468; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))], (&a0)); { Ttype294840* LOC7; TY534811 LOC10; Ropeobj180006* LOC11; LOC7 = (Ttype294840*)0; LOC7 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind294244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0.r; LOC10[1] = rdloc_540188_839829468(a0); LOC11 = (Ropeobj180006*)0; LOC11 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_180482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY534811 LOC19; Ropeobj180006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kind >= ((Tnodekind294020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kind <= ((Tnodekind294020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY534811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_540188_839829468(a0); LOC18[1] = lenfield_541305_839829468(p0); addf_181205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0.r; LOC19[1] = rdloc_540188_839829468(a0); LOC20 = (Ropeobj180006*)0; LOC20 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_180482_2381377266(&appends0, LOC20); } LA5: ; res_556550_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = tmp0.r; LOC21[1] = lens0; LOC21[2] = rope_180401_2381377266(((NI64) (L0))); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3); LOC22 = (Ropeobj180006**)0; LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(LOC22, appends0); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA25; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI294816)); } goto LA23; LA25: ; { genassignment_541264_839829468(p0, (*d0), tmp0, 0); } LA23: ; gcusage_556439_839829468(e0); } N_NIMCALL(void, genstrappend_556554_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 dest0; Ropeobj180006* appends0; Ropeobj180006* lens0; NI L0; TY537238 LOC21; Ropeobj180006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&dest0), 0, sizeof(dest0)); appends0 = (Ropeobj180006*)0; lens0 = (Ropeobj180006*)0; L0 = ((NI) 0); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0)); { NI i_556615_839829468; NI HEX3Atmp_556676_839829468; NI LOC2; NI res_556679_839829468; i_556615_839829468 = (NI)0; HEX3Atmp_556676_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(e0); HEX3Atmp_556676_839829468 = (NI)(LOC2 - ((NI) 3)); res_556679_839829468 = ((NI) 0); { while (1) { if (!(res_556679_839829468 <= HEX3Atmp_556676_839829468)) goto LA4; i_556615_839829468 = res_556679_839829468; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))], (&a0)); { Ttype294840* LOC7; TY534811 LOC10; Ropeobj180006* LOC11; LOC7 = (Ttype294840*)0; LOC7 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind294244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_540188_839829468(dest0); LOC10[1] = rdloc_540188_839829468(a0); LOC11 = (Ropeobj180006*)0; LOC11 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_180482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY534811 LOC19; Ropeobj180006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kind >= ((Tnodekind294020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kind <= ((Tnodekind294020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY534811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_540188_839829468(a0); LOC18[1] = lenfield_541305_839829468(p0); addf_181205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_540188_839829468(dest0); LOC19[1] = rdloc_540188_839829468(a0); LOC20 = (Ropeobj180006*)0; LOC20 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_180482_2381377266(&appends0, LOC20); } LA5: ; res_556679_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_540188_839829468(dest0); LOC21[1] = lens0; LOC21[2] = rope_180401_2381377266(((NI64) (L0))); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3); LOC22 = (Ropeobj180006**)0; LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(LOC22, appends0); gcusage_556439_839829468(e0); } N_NIMCALL(void, genseqelemappend_556683_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { NimStringDesc* seqappendpattern0; Tloc294816 a0; Tloc294816 b0; Tloc294816 dest0; Ttype294840* bt0; TY537238 LOC8; Ttype294840* LOC9; TY534811 LOC10; TY534811 LOC11; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396)); } goto LA1; LA5: ; { seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397)); } LA1: ; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); bt0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_540188_839829468(a0); LOC9 = (Ttype294840*)0; LOC9 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC8[1] = gettypedesc_537671_839829468((*p0).module, LOC9); LOC8[2] = gettypedesc_537671_839829468((*p0).module, bt0); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), seqappendpattern0, LOC8, 3); initloc_534273_839829468((&dest0), ((Tlockind294808) 6), bt0, ((Tstorageloc294812) 3)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_540188_839829468(a0); LOC10[1] = lenfield_541305_839829468(p0); dest0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2); genassignment_541264_839829468(p0, dest0, b0, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_540188_839829468(a0); LOC11[1] = lenfield_541305_839829468(p0); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2); gcusage_556439_839829468(e0); } N_NIMCALL(void, binaryexpr_552549_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; Tloc294816 b0; TY534811 LOC1; Ropeobj180006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); LOC1[1] = rdloc_540188_839829468(b0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0)); } N_NIMCALL(void, genstrequals_558666_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 x0; Tnode294802* a0; Tnode294802* b0; memset((void*)(&x0), 0, sizeof(x0)); a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; b0 = (*e0).kindU.S6.sons->data[((NI) 2)]; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*a0).kind == ((Tnodekind294020) 23)); if (LOC3) goto LA4; LOC3 = ((*b0).kind == ((Tnodekind294020) 23)); LA4: ; if (!LOC3) goto LA5; binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } goto LA1; LA5: ; { NIM_BOOL LOC8; TY534811 LOC12; Ropeobj180006* LOC13; LOC8 = (NIM_BOOL)0; LOC8 = ((*a0).kind >= ((Tnodekind294020) 20) && (*a0).kind <= ((Tnodekind294020) 22)); if (!(LOC8)) goto LA9; LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0); LA9: ; if (!LOC8) goto LA10; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0)); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_540188_839829468(x0); LOC12[1] = lenfield_541305_839829468(p0); LOC13 = (Ropeobj180006*)0; LOC13 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc294812) 0)); } goto LA1; LA10: ; { NIM_BOOL LOC15; TY534811 LOC19; Ropeobj180006* LOC20; LOC15 = (NIM_BOOL)0; LOC15 = ((*b0).kind >= ((Tnodekind294020) 20) && (*b0).kind <= ((Tnodekind294020) 22)); if (!(LOC15)) goto LA16; LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0); LA16: ; if (!LOC15) goto LA17; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_540188_839829468(x0); LOC19[1] = lenfield_541305_839829468(p0); LOC20 = (Ropeobj180006*)0; LOC20 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc294812) 0)); } goto LA1; LA17: ; { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401)); } LA1: ; } N_NIMCALL(void, genisnil_554620_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Ttype294840* t0; t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind294244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention294002) 8)); LA4: ; if (!LOC3) goto LA5; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404)); } goto LA1; LA5: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405)); } LA1: ; } N_NIMCALL(void, gendollar_557391_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; TY180507 LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); a0.r = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 1); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA4; gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA4: ; genassignment_541264_839829468(p0, (*d0), a0, 0); gcusage_556439_839829468(n0); } N_NIMCALL(Ropeobj180006*, genofhelper_557139_839829468)(Tcproc531021* p0, Ttype294840* dest0, Ropeobj180006* a0) { Ropeobj180006* result0; Ropeobj180006* ti0; result0 = (Ropeobj180006*)0; ti0 = gentypeinfo_537941_839829468((*p0).module, dest0); { NIM_BOOL LOC3; NIM_BOOL LOC5; TY534811 LOC9; LOC3 = (NIM_BOOL)0; LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag531025) 5))&7U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag294431) 5))&31U)))!=0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = a0; LOC9[1] = ti0; result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2); } goto LA1; LA7: ; { Ropeobj180006* LOC11; Ropeobj180006* cache0; Ropeobj180006* LOC12; TY180507 LOC13; TY537238 LOC14; LOC11 = (Ropeobj180006*)0; LOC11 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129)); (*(*p0).module).labels += ((NI) 1); LOC12 = (Ropeobj180006*)0; LOC12 = rope_180401_2381377266(((NI64) ((*(*p0).module).labels))); cache0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_415), LOC12); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = cache0; addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = a0; LOC14[1] = ti0; LOC14[2] = cache0; result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3); } LA1: ; return result0; } N_NIMCALL(void, genof_557201_839829468)(Tcproc531021* p0, Tnode294802* x0, Ttype294840* typ0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* dest0; Ropeobj180006* r0; Ropeobj180006* nilcheck0; Ttype294840* t0; Ttype294840* LOC41; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, x0, (&a0)); dest0 = skiptypes_298099_850551059(typ0, IL64(211106247256320)); r0 = rdloc_540188_839829468(a0); nilcheck0 = NIM_NIL; t0 = skiptypes_298099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype294840* LOC16; if (!((*t0).kind == ((Ttypekind294244) 23) || (*t0).kind == ((Ttypekind294244) 21) || (*t0).kind == ((Ttypekind294244) 22))) goto LA2; { if (!!(((*t0).kind == ((Ttypekind294244) 23)))) goto LA5; nilcheck0 = r0; } LA5: ; { NIM_BOOL LOC9; NIM_BOOL LOC11; TY180507 LOC15; LOC9 = (NIM_BOOL)0; LOC9 = !(((*t0).kind == ((Ttypekind294244) 23))); if (LOC9) goto LA10; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA12: ; LOC9 = !(LOC11); LA10: ; if (!LOC9) goto LA13; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = r0; r0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1); } LA13: ; LOC16 = (Ttype294840*)0; LOC16 = lastson_297377_850551059(t0); t0 = skiptypes_298099_850551059(LOC16, IL64(211106232576256)); } LA2: ; } { NIM_BOOL LOC19; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA20: ; if (!!(LOC19)) goto LA21; { while (1) { NIM_BOOL LOC25; TY535289 LOC27; Ropeobj180006* LOC28; LOC25 = (NIM_BOOL)0; LOC25 = ((*t0).kind == ((Ttypekind294244) 17)); if (!(LOC25)) goto LA26; LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA26: ; if (!LOC25) goto LA24; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj180006*)0; LOC28 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0); add_180482_2381377266(&r0, LOC28); t0 = skiptypes_298099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA24: ; } } LA21: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = isobjlackingtypefield_535513_839829468(t0); if (!LOC31) goto LA32; globalerror_198071_155036129((*x0).info, ((Tmsgkind193002) 4), ((NimStringDesc*) &T839829468_412)); } LA32: ; { TY534811 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = genofhelper_557139_839829468(p0, dest0, r0); r0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2); } goto LA34; LA36: ; { TY180507 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = genofhelper_557139_839829468(p0, dest0, r0); r0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1); } LA34: ; LOC41 = (Ttype294840*)0; LOC41 = getsystype_340150_3937434831(((Ttypekind294244) 1)); putintodest_552468_839829468(p0, d0, LOC41, r0, a0.s); } N_NIMCALL(void, genof_557331_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { genof_557201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0); } N_NIMCALL(void, rawgennew_556741_839829468)(Tcproc531021* p0, Tloc294816 a0, Ropeobj180006* sizeexpr_556745_839829468) { Ropeobj180006* sizeexpr0; Ttype294840* reftype0; Tloc294816 b0; TY537238 args0; Ttype294840* bt0; sizeexpr0 = sizeexpr_556745_839829468; reftype0 = skiptypes_298099_850551059(a0.t, IL64(211106242013440)); memset((void*)(&b0), 0, sizeof(b0)); initloc_534273_839829468((&b0), ((Tlockind294808) 6), a0.t, ((Tstorageloc294812) 3)); { TY180507 LOC5; Ttype294840* LOC6; if (!sizeexpr0 == 0) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (Ttype294840*)0; LOC6 = skiptypes_298099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); LOC5[0] = gettypedesc_537671_839829468((*p0).module, LOC6); sizeexpr0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1); } LA3: ; memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_537671_839829468((*p0).module, reftype0); args0[1] = gentypeinfo_537941_839829468((*p0).module, reftype0); args0[2] = sizeexpr0; { NIM_BOOL LOC9; TY534811 LOC21; LOC9 = (NIM_BOOL)0; LOC9 = (a0.s == ((Tstorageloc294812) 3)); if (!(LOC9)) goto LA10; LOC9 = usesnativegc_171177_2607990831(); LA10: ; if (!LOC9) goto LA11; { NIM_BOOL LOC15; TY180507 LOC18; LOC15 = (NIM_BOOL)0; LOC15 = canformacycle_322123_3876443242(a0.t); if (!LOC15) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_540188_839829468(a0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1); } goto LA13; LA16: ; { TY180507 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_540188_839829468(a0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1); } LA13: ; b0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_540188_839829468(a0); LOC21[1] = rdloc_540188_839829468(b0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2); } goto LA7; LA11: ; { b0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3); genassignment_541264_839829468(p0, a0, b0, 0); } LA7: ; bt0 = skiptypes_298099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), bt0, a0, NIM_FALSE); } N_NIMCALL(void, gennew_556782_839829468)(Tcproc531021* p0, Tnode294802* e0) { Tloc294816 a0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI LOC3; Tloc294816 se0; Ropeobj180006* LOC6; LOC3 = (NI)0; LOC3 = len_295081_850551059(e0); if (!(LOC3 == ((NI) 3))) goto LA4; memset((void*)(&se0), 0, sizeof(se0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0)); LOC6 = (Ropeobj180006*)0; LOC6 = rdloc_540188_839829468(se0); rawgennew_556741_839829468(p0, a0, LOC6); } goto LA1; LA4: ; { rawgennew_556741_839829468(p0, a0, NIM_NIL); } LA1: ; gcusage_556439_839829468(e0); } N_NIMCALL(void, gennewfinalize_557110_839829468)(Tcproc531021* p0, Tnode294802* e0) { Tloc294816 a0; Tloc294816 b0; Tloc294816 f0; Ttype294840* reftype0; Ttype294840* bt0; Ropeobj180006* ti0; TY534811 LOC1; TY537238 LOC2; Ttype294840* LOC3; Ttype294840* LOC4; Ttype294840* LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&f0), 0, sizeof(f0)); reftype0 = (Ttype294840*)0; bt0 = (Ttype294840*)0; ti0 = (Ropeobj180006*)0; reftype0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0)); initloc_534273_839829468((&b0), ((Tlockind294808) 6), a0.t, ((Tstorageloc294812) 3)); ti0 = gentypeinfo_537941_839829468((*p0).module, reftype0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = ti0; LOC1[1] = rdloc_540188_839829468(f0); addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_537671_839829468((*p0).module, reftype0); LOC2[1] = ti0; LOC3 = (Ttype294840*)0; LOC3 = lastson_297377_850551059(reftype0); LOC4 = (Ttype294840*)0; LOC4 = skiptypes_298099_850551059(LOC3, IL64(211106233624832)); LOC2[2] = gettypedesc_537671_839829468((*p0).module, LOC4); b0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3); genassignment_541264_839829468(p0, a0, b0, 0); LOC5 = (Ttype294840*)0; LOC5 = lastson_297377_850551059(reftype0); bt0 = skiptypes_298099_850551059(LOC5, IL64(211106233624832)); genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), bt0, a0, NIM_FALSE); gcusage_556439_839829468(e0); } N_NIMCALL(void, gennewseqaux_556795_839829468)(Tcproc531021* p0, Tloc294816 dest0, Ropeobj180006* length0) { Ttype294840* seqtype0; TY537238 args0; Tloc294816 call0; seqtype0 = skiptypes_298099_850551059(dest0.t, IL64(211106242013440)); memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_537671_839829468((*p0).module, seqtype0); args0[1] = gentypeinfo_537941_839829468((*p0).module, seqtype0); args0[2] = length0; memset((void*)(&call0), 0, sizeof(call0)); initloc_534273_839829468((&call0), ((Tlockind294808) 6), dest0.t, ((Tstorageloc294812) 3)); { NIM_BOOL LOC3; TY534811 LOC15; LOC3 = (NIM_BOOL)0; LOC3 = (dest0.s == ((Tstorageloc294812) 3)); if (!(LOC3)) goto LA4; LOC3 = usesnativegc_171177_2607990831(); LA4: ; if (!LOC3) goto LA5; { NIM_BOOL LOC9; TY180507 LOC12; LOC9 = (NIM_BOOL)0; LOC9 = canformacycle_322123_3876443242(dest0.t); if (!LOC9) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_540188_839829468(dest0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1); } goto LA7; LA10: ; { TY180507 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_540188_839829468(dest0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1); } LA7: ; call0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rdloc_540188_839829468(dest0); LOC15[1] = rdloc_540188_839829468(call0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2); } goto LA1; LA5: ; { call0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3); genassignment_541264_839829468(p0, dest0, call0, 0); } LA1: ; } N_NIMCALL(void, gennewseq_556824_839829468)(Tcproc531021* p0, Tnode294802* e0) { Tloc294816 a0; Tloc294816 b0; Ropeobj180006* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (Ropeobj180006*)0; LOC1 = rdloc_540188_839829468(b0); gennewseqaux_556795_839829468(p0, a0, LOC1); gcusage_556439_839829468(e0); } N_NIMCALL(void, gennewseqofcap_556836_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Ttype294840* seqtype0; Tloc294816 a0; TY537238 LOC1; Ropeobj180006* LOC2; seqtype0 = skiptypes_298099_850551059((*e0).typ, IL64(211106242013440)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = gettypedesc_537671_839829468((*p0).module, seqtype0); LOC1[1] = gentypeinfo_537941_839829468((*p0).module, seqtype0); LOC1[2] = rdloc_540188_839829468(a0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0)); gcusage_556439_839829468(e0); } N_NIMCALL(Ropeobj180006*, getclosuretype_537683_839829468)(Tcgen531027* m0, Ttype294840* t0, Tclosuretypekind537679 kind0) { Ropeobj180006* result0; Intset270030 check0; Ropeobj180006* rettype0; Ropeobj180006* desc0; result0 = (Ropeobj180006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_270885_2627731572((&check0)); result0 = gettempname_535596_839829468(m0); rettype0 = (Ropeobj180006*)0; desc0 = (Ropeobj180006*)0; genprocparams_536115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind537679) 0))), NIM_FALSE); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedtype_535449_839829468(t0); if (!!(LOC3)) goto LA4; { NIM_BOOL LOC8; TY537235 LOC12; LOC8 = (NIM_BOOL)0; LOC8 = !(((*t0).callconv == ((Tcallingconvention294002) 8))); if (LOC8) goto LA9; LOC8 = !((kind0 == ((Tclosuretypekind537679) 2))); LA9: ; if (!LOC8) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_180277_2381377266(Callingconvtostr_535585_839829468[((*t0).callconv)- 0]); LOC12[1] = rettype0; LOC12[2] = result0; LOC12[3] = desc0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4); } goto LA6; LA10: ; { TY537238 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC14[1] = rettype0; LOC14[2] = desc0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3); } LA6: ; } LA4: ; return result0; } N_NIMCALL(void, gensomecast_558480_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* etyp0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); etyp0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832)); { NIM_BOOL LOC3; TY534811 LOC7; Ropeobj180006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((*etyp0).kind == ((Ttypekind294244) 18) || (*etyp0).kind == ((Ttypekind294244) 17) || (*etyp0).kind == ((Ttypekind294244) 16) || (*etyp0).kind == ((Ttypekind294244) 27) || (*etyp0).kind == ((Ttypekind294244) 48) || (*etyp0).kind == ((Ttypekind294244) 4)); if (!(LOC3)) goto LA4; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag294810) 0))&15U)))!=0)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_537671_839829468((*p0).module, (*e0).typ); LOC7[1] = addrloc_540204_839829468(a0); LOC8 = (Ropeobj180006*)0; LOC8 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC8, a0.s); } goto LA1; LA5: ; { NIM_BOOL LOC10; TY534811 LOC14; Ropeobj180006* LOC15; LOC10 = (NIM_BOOL)0; LOC10 = ((*etyp0).kind == ((Ttypekind294244) 25)); if (!(LOC10)) goto LA11; LOC10 = ((*etyp0).callconv == ((Tcallingconvention294002) 8)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = getclosuretype_537683_839829468((*p0).module, etyp0, ((Tclosuretypekind537679) 1)); LOC14[1] = rdcharloc_540227_839829468(a0); LOC15 = (Ropeobj180006*)0; LOC15 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } goto LA1; LA12: ; { TY534811 LOC17; Ropeobj180006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_537671_839829468((*p0).module, (*e0).typ); LOC17[1] = rdcharloc_540227_839829468(a0); LOC18 = (Ropeobj180006*)0; LOC18 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } LA1: ; } N_NIMCALL(void, unaryexprchar_553222_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; TY180507 LOC1; Ropeobj180006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_540227_839829468(a0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0)); } N_NIMCALL(void, genord_558474_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { unaryexprchar_553222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301)); } N_NIMCALL(void, genarraylen_557415_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) { Tnode294802* a0; Ttype294840* typ0; a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; { if (!((*a0).kind == ((Tnodekind294020) 64))) goto LA3; a0 = (*a0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; typ0 = skiptypes_298099_850551059((*a0).typ, IL64(211106240964864)); switch ((*typ0).kind) { case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { { if (!(op0 == ((Tmagic294524) 8))) goto LA8; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431)); } goto LA6; LA8: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432)); } LA6: ; } break; case ((Ttypekind294244) 29): { usestringh_534345_839829468((*p0).module); { if (!(op0 == ((Tmagic294524) 8))) goto LA14; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433)); } goto LA12; LA14: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434)); } LA12: ; } break; case ((Ttypekind294244) 28): case ((Ttypekind294244) 24): { { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA21: ; if (!!(LOC20)) goto LA22; { if (!(op0 == ((Tmagic294524) 8))) goto LA26; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435)); } goto LA24; LA26: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436)); } LA24: ; } goto LA18; LA22: ; { { if (!(op0 == ((Tmagic294524) 8))) goto LA32; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437)); } goto LA30; LA32: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438)); } LA30: ; } LA18: ; } break; case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { { NI64 LOC40; Ropeobj180006* LOC41; if (!(op0 == ((Tmagic294524) 8))) goto LA38; LOC40 = (NI64)0; LOC40 = lastord_322004_3876443242(typ0); LOC41 = (Ropeobj180006*)0; LOC41 = rope_180401_2381377266(LOC40); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc294812) 0)); } goto LA36; LA38: ; { NI64 LOC43; Ropeobj180006* LOC44; LOC43 = (NI64)0; LOC43 = lengthord_322007_3876443242(typ0); LOC44 = (Ropeobj180006*)0; LOC44 = rope_180401_2381377266(LOC43); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc294812) 0)); } LA36: ; } break; default: { internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439)); } break; } } N_NIMCALL(void, unarystmt_552527_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; TY180507 LOC5; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3; internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442)); } LA3: ; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(a0); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC5, 1); } N_NIMCALL(void, gensetlengthstr_557632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { binarystmt_552501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445)); gcusage_556439_839829468(e0); } N_NIMCALL(void, gensetlengthseq_557500_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Ttype294840* t0; NimStringDesc* setlenpattern0; TY537235 LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446)); } goto LA1; LA5: ; { setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447)); } LA1: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_540188_839829468(a0); LOC8[1] = rdloc_540188_839829468(b0); LOC8[2] = gettypedesc_537671_839829468((*p0).module, t0); LOC8[3] = gettypedesc_537671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), setlenpattern0, LOC8, 4); gcusage_556439_839829468(e0); } N_NIMCALL(Ropeobj180006*, rdsetelemloc_557662_839829468)(Tloc294816 a0, Ttype294840* settype0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = rdcharloc_540227_839829468(a0); { NI64 LOC3; TY534811 LOC6; NI64 LOC7; LOC3 = (NI64)0; LOC3 = firstord_322001_3876443242(settype0); if (!!((LOC3 == IL64(0)))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; LOC7 = (NI64)0; LOC7 = firstord_322001_3876443242(settype0); LOC6[1] = rope_180401_2381377266(LOC7); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2); } LA4: ; return result0; } N_NIMCALL(void, binarystmtinexcl_557857_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; Tloc294816 b0; TY534811 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); LOC1[1] = rdsetelemloc_557662_839829468(b0, a0.t); linef_534700_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC1, 2); } N_NIMCALL(void, binaryexprchar_552809_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) { Tloc294816 a0; Tloc294816 b0; TY534811 LOC1; Ropeobj180006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_540227_839829468(a0); LOC1[1] = rdcharloc_540227_839829468(b0); LOC2 = (Ropeobj180006*)0; LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0)); } N_NIMCALL(NIM_BOOL, fewcmps_557803_839829468)(Tnode294802* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { if (!!(((*s0).kind == ((Tnodekind294020) 39)))) goto LA3; internalerror_198100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463)); } LA3: ; { NIM_BOOL LOC7; NI64 LOC8; LOC7 = (NIM_BOOL)0; LOC8 = (NI64)0; LOC8 = getsize_322135_3876443242((*s0).typ); LOC7 = (LOC8 <= ((NI64) (intsize_178641_4151366050))); if (!(LOC7)) goto LA9; LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag294427) 4))&15U)))!=0); LA9: ; if (!LOC7) goto LA10; result0 = NIM_FALSE; } goto LA5; LA10: ; { Ttype294840* LOC13; LOC13 = (Ttype294840*)0; LOC13 = elemtype_322394_3876443242((*s0).typ); if (!((*LOC13).kind == ((Ttypekind294244) 31) || (*LOC13).kind >= ((Ttypekind294244) 33) && (*LOC13).kind <= ((Ttypekind294244) 35))) goto LA14; result0 = NIM_TRUE; } goto LA5; LA14: ; { NI LOC17; LOC17 = (NI)0; LOC17 = sonslen_297351_850551059(s0); result0 = (LOC17 <= ((NI) 8)); } LA5: ; return result0; } N_NIMCALL(void, binaryexprin_557837_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0, NimStringDesc* frmt0) { TY534811 LOC1; Ropeobj180006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468((*a0)); LOC1[1] = rdsetelemloc_557662_839829468((*b0), (*a0).t); LOC2 = (Ropeobj180006*)0; LOC2 = HEX25_180905_2381377266(frmt0, LOC1, 2); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0)); } N_NIMCALL(void, geninexpraux_555496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0) { Ttype294840* LOC1; NI64 LOC2; LOC1 = (Ttype294840*)0; LOC1 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC2 = (NI64)0; LOC2 = getsize_322135_3876443242(LOC1); switch (((NI) (LOC2))) { case ((NI) 1): { binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467)); } break; case ((NI) 2): { binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468)); } break; case ((NI) 4): { binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469)); } break; case ((NI) 8): { binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470)); } break; default: { binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471)); } break; } } N_NIMCALL(void, geninop_558009_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Tloc294816 x0; Tloc294816 y0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); { NIM_BOOL LOC3; Tnode294802* ea0; NI length0; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind294020) 39)); if (!(LOC3)) goto LA4; LOC3 = fewcmps_557803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]); LA4: ; if (!LOC3) goto LA5; { if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 69))) goto LA9; ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)]; } goto LA7; LA9: ; { ea0 = (*e0).kindU.S6.sons->data[((NI) 2)]; } LA7: ; initlocexpr_541283_839829468(p0, ea0, (&a0)); initloc_534273_839829468((&b0), ((Tlockind294808) 6), (*e0).typ, ((Tstorageloc294812) 0)); b0.r = rope_180277_2381377266(((NimStringDesc*) &T839829468_118)); length0 = sonslen_297351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]); { NI i_558061_839829468; NI HEX3Atmp_558412_839829468; NI res_558415_839829468; i_558061_839829468 = (NI)0; HEX3Atmp_558412_839829468 = (NI)0; HEX3Atmp_558412_839829468 = (NI)(length0 - ((NI) 1)); res_558415_839829468 = ((NI) 0); { while (1) { if (!(res_558415_839829468 <= HEX3Atmp_558412_839829468)) goto LA14; i_558061_839829468 = res_558415_839829468; { TY537238 LOC19; if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468]).kind == ((Tnodekind294020) 44))) goto LA17; initlocexpr_541283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_541283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdcharloc_540227_839829468(a0); LOC19[1] = rdcharloc_540227_839829468(x0); LOC19[2] = rdcharloc_540227_839829468(y0); addf_181205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3); } goto LA15; LA17: ; { TY534811 LOC21; initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468], (&x0)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdcharloc_540227_839829468(a0); LOC21[1] = rdcharloc_540227_839829468(x0); addf_181205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2); } LA15: ; { if (!(i_558061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24; add_180487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466)); } LA24: ; res_558415_839829468 += ((NI) 1); } LA14: ; } } add_180487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117)); putintodest_552468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc294812) 0)); } goto LA1; LA5: ; { initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); geninexpraux_555496_839829468(p0, e0, (&a0), (&b0), d0); } LA1: ; } N_NIMCALL(void, gensetop_558419_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) { Tloc294816 a0; Tloc294816 b0; Tloc294816 i0; Ttype294840* settype0; NI size0; NI64 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&i0), 0, sizeof(i0)); settype0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC1 = (NI64)0; LOC1 = getsize_322135_3876443242(settype0); size0 = ((NI) (LOC1)); switch (size0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { switch (op0) { case ((Tmagic294524) 39): { NimStringDesc* ts0; NimStringDesc* LOC4; NimStringDesc* LOC5; NimStringDesc* LOC6; LOC4 = (NimStringDesc*)0; LOC5 = (NimStringDesc*)0; LOC5 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC4 = rawNewString(LOC5->Sup.len + 2); appendString(LOC4, ((NimStringDesc*) &T839829468_45)); appendString(LOC4, LOC5); ts0 = LOC4; LOC6 = (NimStringDesc*)0; LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35); appendString(LOC6, ((NimStringDesc*) &T839829468_449)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_450)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_451)); binarystmtinexcl_557857_839829468(p0, e0, d0, LOC6); } break; case ((Tmagic294524) 40): { NimStringDesc* ts0; NimStringDesc* LOC8; NimStringDesc* LOC9; NimStringDesc* LOC10; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC8 = rawNewString(LOC9->Sup.len + 2); appendString(LOC8, ((NimStringDesc*) &T839829468_45)); appendString(LOC8, LOC9); ts0 = LOC8; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42); appendString(LOC10, ((NimStringDesc*) &T839829468_452)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_453)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_454)); binarystmtinexcl_557857_839829468(p0, e0, d0, LOC10); } break; case ((Tmagic294524) 41): { { if (!(size0 <= ((NI) 4))) goto LA14; unaryexprchar_553222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455)); } goto LA12; LA14: ; { unaryexprchar_553222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456)); } LA12: ; } break; case ((Tmagic294524) 133): { binaryexprchar_552809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457)); } break; case ((Tmagic294524) 132): { binaryexprchar_552809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458)); } break; case ((Tmagic294524) 131): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } break; case ((Tmagic294524) 134): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459)); } break; case ((Tmagic294524) 135): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460)); } break; case ((Tmagic294524) 136): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461)); } break; case ((Tmagic294524) 137): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462)); } break; case ((Tmagic294524) 148): { geninop_558009_839829468(p0, e0, d0); } break; default: { internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472)); } break; } } break; default: { switch (op0) { case ((Tmagic294524) 39): { binarystmtinexcl_557857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473)); } break; case ((Tmagic294524) 40): { binarystmtinexcl_557857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474)); } break; case ((Tmagic294524) 41): { NimStringDesc* LOC30; NimStringDesc* LOC31; LOC30 = (NimStringDesc*)0; LOC31 = (NimStringDesc*)0; LOC31 = nimIntToStr(size0); LOC30 = rawNewString(LOC31->Sup.len + 14); appendString(LOC30, ((NimStringDesc*) &T839829468_475)); appendString(LOC30, LOC31); appendChar(LOC30, 41); unaryexprchar_553222_839829468(p0, e0, d0, LOC30); } break; case ((Tmagic294524) 133): case ((Tmagic294524) 132): { Ttype294840* LOC33; TY538475 LOC39; LOC33 = (Ttype294840*)0; LOC33 = getsystype_340150_3937434831(((Ttypekind294244) 31)); gettemp_539032_839829468(p0, LOC33, (&i0), NIM_FALSE); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype294840* LOC38; if (!((*d0).k == ((Tlockind294808) 0))) goto LA36; LOC38 = (Ttype294840*)0; LOC38 = getsystype_340150_3937434831(((Ttypekind294244) 1)); gettemp_539032_839829468(p0, LOC38, d0, NIM_FALSE); } LA36: ; memset((void*)LOC39, 0, sizeof(LOC39)); LOC39[0] = rdloc_540188_839829468(i0); LOC39[1] = rope_180401_2381377266(((NI64) (size0))); LOC39[2] = rdloc_540188_839829468((*d0)); LOC39[3] = rdloc_540188_839829468(a0); LOC39[4] = rdloc_540188_839829468(b0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), lookupopr_558426_839829468[(op0)- 132], LOC39, 5); } break; case ((Tmagic294524) 131): { NimStringDesc* LOC41; NimStringDesc* LOC42; usestringh_534345_839829468((*p0).module); LOC41 = (NimStringDesc*)0; LOC42 = (NimStringDesc*)0; LOC42 = nimIntToStr(size0); LOC41 = rawNewString(LOC42->Sup.len + 21); appendString(LOC41, ((NimStringDesc*) &T839829468_481)); appendString(LOC41, LOC42); appendString(LOC41, ((NimStringDesc*) &T839829468_482)); binaryexprchar_552809_839829468(p0, e0, d0, LOC41); } break; case ((Tmagic294524) 134): case ((Tmagic294524) 135): case ((Tmagic294524) 136): case ((Tmagic294524) 137): { Ttype294840* LOC44; TY538847 LOC49; LOC44 = (Ttype294840*)0; LOC44 = getsystype_340150_3937434831(((Ttypekind294244) 31)); gettemp_539032_839829468(p0, LOC44, (&i0), NIM_FALSE); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA47; gettemp_539032_839829468(p0, a0.t, d0, NIM_FALSE); } LA47: ; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_540188_839829468(i0); LOC49[1] = rope_180401_2381377266(((NI64) (size0))); LOC49[2] = rdloc_540188_839829468((*d0)); LOC49[3] = rdloc_540188_839829468(a0); LOC49[4] = rdloc_540188_839829468(b0); LOC49[5] = rope_180277_2381377266(lookupopr_558426_839829468[(op0)- 132]); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6); } break; case ((Tmagic294524) 148): { geninop_558009_839829468(p0, e0, d0); } break; default: { internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484)); } break; } } break; } } static N_INLINE(Ropeobj180006*, genargstringtocstring_541776_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; Tloc294816 a0; TY180507 LOC1; result0 = (Ropeobj180006*)0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1); return result0; } N_NIMCALL(Ropeobj180006*, openarrayloc_541665_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; Tloc294816 a0; Tnode294802* q0; result0 = (Ropeobj180006*)0; memset((void*)(&a0), 0, sizeof(a0)); q0 = skipconv_330882_3876443242(n0); { Tmagic294524 LOC3; Tloc294816 b0; Tloc294816 c0; Tnode294802* LOC6; Tnode294802* LOC7; Tnode294802* LOC8; NimStringDesc* fmt0; Ttype294840* LOC9; TY537238 LOC25; LOC3 = (Tmagic294524)0; LOC3 = getmagic_320502_2616423590(q0); if (!(LOC3 == ((Tmagic294524) 139))) goto LA4; memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&c0), 0, sizeof(c0)); LOC6 = (Tnode294802*)0; LOC6 = HEX5BHEX5D_295238_850551059(q0, ((NI) 1)); initlocexpr_541283_839829468(p0, LOC6, (&a0)); LOC7 = (Tnode294802*)0; LOC7 = HEX5BHEX5D_295238_850551059(q0, ((NI) 2)); initlocexpr_541283_839829468(p0, LOC7, (&b0)); LOC8 = (Tnode294802*)0; LOC8 = HEX5BHEX5D_295238_850551059(q0, ((NI) 3)); initlocexpr_541283_839829468(p0, LOC8, (&c0)); LOC9 = (Ttype294840*)0; LOC9 = skiptypes_298099_850551059(a0.t, IL64(211106243062016)); switch ((*LOC9).kind) { case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { fmt0 = copyString(((NimStringDesc*) &T839829468_486)); } break; case ((Ttypekind294244) 28): case ((Ttypekind294244) 24): { { NIM_BOOL LOC14; Ttype294840* LOC15; NIM_BOOL LOC17; LOC14 = (NIM_BOOL)0; LOC15 = (Ttype294840*)0; LOC15 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256)); LOC14 = ((*LOC15).kind == ((Ttypekind294244) 23)); if (!(LOC14)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC17) goto LA18; LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA18: ; LOC14 = !(LOC17); LA16: ; if (!LOC14) goto LA19; fmt0 = copyString(((NimStringDesc*) &T839829468_487)); } goto LA12; LA19: ; { fmt0 = copyString(((NimStringDesc*) &T839829468_488)); } LA12: ; } break; default: { NimStringDesc* LOC23; NimStringDesc* LOC24; LOC23 = (NimStringDesc*)0; LOC24 = (NimStringDesc*)0; LOC24 = typetostring_322017_3876443242(a0.t, ((Tprefereddesc322011) 0)); LOC23 = rawNewString(LOC24->Sup.len + 14); appendString(LOC23, ((NimStringDesc*) &T839829468_489)); appendString(LOC23, LOC24); internalerror_198113_155036129(LOC23); fmt0 = copyString(((NimStringDesc*) &T839829468_490)); } break; } memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_540188_839829468(a0); LOC25[1] = rdloc_540188_839829468(b0); LOC25[2] = rdloc_540188_839829468(c0); result0 = HEX25_180905_2381377266(fmt0, LOC25, 3); } goto LA1; LA4: ; { Ttype294840* LOC27; initlocexpr_541283_839829468(p0, n0, (&a0)); LOC27 = (Ttype294840*)0; LOC27 = skiptypes_298099_850551059(a0.t, IL64(211106240964864)); switch ((*LOC27).kind) { case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { TY180507 LOC29; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_540188_839829468(a0); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1); } break; case ((Ttypekind294244) 28): case ((Ttypekind294244) 24): { { NIM_BOOL LOC33; Ttype294840* LOC34; NIM_BOOL LOC36; TY534811 LOC40; LOC33 = (NIM_BOOL)0; LOC34 = (Ttype294840*)0; LOC34 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256)); LOC33 = ((*LOC34).kind == ((Ttypekind294244) 23)); if (!(LOC33)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC36) goto LA37; LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA37: ; LOC33 = !(LOC36); LA35: ; if (!LOC33) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = rdloc_540188_839829468(a0); LOC40[1] = lenfield_541305_839829468(p0); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2); } goto LA31; LA38: ; { TY534811 LOC42; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rdloc_540188_839829468(a0); LOC42[1] = lenfield_541305_839829468(p0); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2); } LA31: ; } break; case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { TY534811 LOC44; NI64 LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_540188_839829468(a0); LOC45 = (NI64)0; LOC45 = lengthord_322007_3876443242(a0.t); LOC44[1] = rope_180401_2381377266(LOC45); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2); } break; case ((Ttypekind294244) 21): case ((Ttypekind294244) 22): { Ttype294840* LOC47; LOC47 = (Ttype294840*)0; LOC47 = lastson_297377_850551059(a0.t); switch ((*LOC47).kind) { case ((Ttypekind294244) 28): case ((Ttypekind294244) 24): { TY534811 LOC49; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_540188_839829468(a0); LOC49[1] = lenfield_541305_839829468(p0); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2); } break; case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { TY534811 LOC51; Ttype294840* LOC52; NI64 LOC53; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_540188_839829468(a0); LOC52 = (Ttype294840*)0; LOC52 = lastson_297377_850551059(a0.t); LOC53 = (NI64)0; LOC53 = lengthord_322007_3876443242(LOC52); LOC51[1] = rope_180401_2381377266(LOC53); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2); } break; default: { NimStringDesc* LOC55; NimStringDesc* LOC56; LOC55 = (NimStringDesc*)0; LOC56 = (NimStringDesc*)0; LOC56 = typetostring_322017_3876443242(a0.t, ((Tprefereddesc322011) 0)); LOC55 = rawNewString(LOC56->Sup.len + 14); appendString(LOC55, ((NimStringDesc*) &T839829468_489)); appendString(LOC55, LOC56); internalerror_198113_155036129(LOC55); } break; } } break; default: { NimStringDesc* LOC58; NimStringDesc* LOC59; LOC58 = (NimStringDesc*)0; LOC59 = (NimStringDesc*)0; LOC59 = typetostring_322017_3876443242(a0.t, ((Tprefereddesc322011) 0)); LOC58 = rawNewString(LOC59->Sup.len + 14); appendString(LOC58, ((NimStringDesc*) &T839829468_489)); appendString(LOC58, LOC59); internalerror_198113_155036129(LOC58); } break; } } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, genarg_541787_839829468)(Tcproc531021* p0, Tnode294802* n_541790_839829468, Tsym294834* param0, Tnode294802* call0) { Ropeobj180006* result0; Tloc294816 a0; result0 = (Ropeobj180006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n_541790_839829468).kind == ((Tnodekind294020) 71))) goto LA3; result0 = genargstringtocstring_541776_839829468(p0, n_541790_839829468); } goto LA1; LA3: ; { Ttype294840* LOC6; Tnode294802* n0; LOC6 = (Ttype294840*)0; LOC6 = skiptypes_298099_850551059((*param0).typ, IL64(211106240964864)); if (!((*LOC6).kind == ((Ttypekind294244) 27) || (*LOC6).kind == ((Ttypekind294244) 48))) goto LA7; { if (!!(((*n_541790_839829468).kind == ((Tnodekind294020) 64)))) goto LA11; n0 = n_541790_839829468; } goto LA9; LA11: ; { n0 = (*n_541790_839829468).kindU.S6.sons->data[((NI) 0)]; } LA9: ; result0 = openarrayloc_541665_839829468(p0, n0); } goto LA1; LA7: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ccgintroducedptr_535609_839829468(param0); if (!LOC15) goto LA16; initlocexpr_541283_839829468(p0, n_541790_839829468, (&a0)); result0 = addrloc_540204_839829468(a0); } goto LA1; LA16: ; { NIM_BOOL LOC19; NIM_BOOL LOC20; NIM_BOOL LOC21; Tnode294802* callee0; LOC19 = (NIM_BOOL)0; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC21) goto LA22; LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC20 = ((*(*param0).typ).kind == ((Ttypekind294244) 23)); LA23: ; LOC19 = LOC20; if (!(LOC19)) goto LA24; LOC19 = ((*n_541790_839829468).kind == ((Tnodekind294020) 64)); LA24: ; if (!LOC19) goto LA25; initlocexprsingleuse_541289_839829468(p0, (*n_541790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0)); callee0 = (*call0).kindU.S6.sons->data[((NI) 0)]; { NIM_BOOL LOC29; NIM_BOOL LOC30; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*callee0).kind == ((Tnodekind294020) 3)); if (!(LOC30)) goto LA31; LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0)); LA32: ; if (!LOC29) goto LA33; result0 = addrloc_540204_839829468(a0); } goto LA27; LA33: ; { result0 = rdloc_540188_839829468(a0); } LA27: ; } goto LA1; LA25: ; { initlocexprsingleuse_541289_839829468(p0, n_541790_839829468, (&a0)); result0 = rdloc_540188_839829468(a0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, genargnoparam_541938_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; Tloc294816 a0; result0 = (Ropeobj180006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n0).kind == ((Tnodekind294020) 71))) goto LA3; result0 = genargstringtocstring_541776_839829468(p0, n0); } goto LA1; LA3: ; { initlocexprsingleuse_541289_839829468(p0, n0, (&a0)); result0 = rdloc_540188_839829468(a0); } LA1: ; return result0; } N_NIMCALL(Ropeobj180006*, getrawproctype_542459_839829468)(Tcproc531021* p0, Ttype294840* t0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = getclosuretype_537683_839829468((*p0).module, t0, ((Tclosuretypekind537679) 0)); return result0; } N_NIMCALL(NIM_BOOL, leftappearsonrightside_541329_839829468)(Tnode294802* le0, Tnode294802* ri0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!!((le0 == NIM_NIL))) goto LA3; { NI i_541364_839829468; NI HEX3Atmp_541376_839829468; NI LOC6; NI res_541379_839829468; i_541364_839829468 = (NI)0; HEX3Atmp_541376_839829468 = (NI)0; LOC6 = (NI)0; LOC6 = len_295081_850551059(ri0); HEX3Atmp_541376_839829468 = (LOC6 - 1); res_541379_839829468 = ((NI) 1); { while (1) { Tnode294802* r0; if (!(res_541379_839829468 <= HEX3Atmp_541376_839829468)) goto LA8; i_541364_839829468 = res_541379_839829468; r0 = HEX5BHEX5D_295238_850551059(ri0, i_541364_839829468); { Tanalysisresult475003 LOC11; LOC11 = (Tanalysisresult475003)0; LOC11 = ispartof_475340_788060399(le0, r0); if (!!((LOC11 == ((Tanalysisresult475003) 0)))) goto LA12; result0 = NIM_TRUE; goto BeforeRet; } LA12: ; res_541379_839829468 += ((NI) 1); } LA8: ; } } } LA3: ; }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, hasnoinit_541383_839829468)(Tnode294802* call0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC1)) goto LA2; LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, resetloc_540350_839829468)(Tcproc531021* p0, Tloc294816* loc0) { NIM_BOOL containsgcref0; Ttype294840* typ0; { containsgcref0 = containsgarbagecollectedref_322117_3876443242((*loc0).t); typ0 = skiptypes_298099_850551059((*loc0).t, IL64(211106242013440)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedcpptype_535476_839829468(typ0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscomplexvaluetype_540317_839829468(typ0); if (!!(LOC8)) goto LA9; { Tloc294816 nilloc0; if (!containsgcref0) goto LA13; memset((void*)(&nilloc0), 0, sizeof(nilloc0)); initloc_534273_839829468((&nilloc0), ((Tlockind294808) 1), (*loc0).t, ((Tstorageloc294812) 2)); nilloc0.r = rope_180277_2381377266(((NimStringDesc*) &T839829468_174)); genrefassign_540311_839829468(p0, (*loc0), nilloc0, 8); } goto LA11; LA13: ; { TY180507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_540188_839829468((*loc0)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1); } LA11: ; } goto LA6; LA9: ; { { TY180507 LOC22; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 6))&31U)))!=0)) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = addrloc_540204_839829468((*loc0)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1); } LA20: ; { TY534811 LOC27; if (!!(((*loc0).s == ((Tstorageloc294812) 2)))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = addrloc_540204_839829468((*loc0)); LOC27[1] = gentypeinfo_537941_839829468((*p0).module, (*loc0).t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2); genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), (*loc0).t, (*loc0), NIM_TRUE); } goto LA23; LA25: ; { TY534811 LOC29; usestringh_534345_839829468((*p0).module); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = addrloc_540204_839829468((*loc0)); LOC29[1] = rdloc_540188_839829468((*loc0)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2); genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), (*loc0).t, (*loc0), NIM_TRUE); } LA23: ; } LA6: ; }BeforeRet: ; } N_NIMCALL(Ropeobj180006*, addcomma_542464_839829468)(Ropeobj180006* r0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { if (!(r0 == NIM_NIL)) goto LA3; result0 = r0; } goto LA1; LA3: ; { TY535289 LOC6; Ropeobj180006* LOC7; memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj180006*)0; LOC7 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0); result0 = HEX26_180418_2381377266(r0, LOC7); } LA1: ; return result0; } N_NIMCALL(void, genclosurecall_542452_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) { Tloc294816 op0; Ropeobj180006* pl0; Ttype294840* typ0; NI length0; Ropeobj180006* rawproc0; NimStringDesc* callpattern0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); pl0 = (Ropeobj180006*)0; typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_297351_850551059(ri0); { NI i_542613_839829468; NI HEX3Atmp_543214_839829468; NI res_543217_839829468; i_542613_839829468 = (NI)0; HEX3Atmp_543214_839829468 = (NI)0; HEX3Atmp_543214_839829468 = (NI)(length0 - ((NI) 1)); res_543217_839829468 = ((NI) 1); { while (1) { if (!(res_543217_839829468 <= HEX3Atmp_543214_839829468)) goto LA3; i_542613_839829468 = res_543217_839829468; { NI LOC6; Tnode294802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_297327_850551059(typ0); if (!(i_542613_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_542613_839829468]; { NIM_BOOL LOC11; Ropeobj180006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_330706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY535289 LOC18; Ropeobj180006* LOC19; if (!!((pl0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj180006*)0; LOC19 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_180482_2381377266(&pl0, LOC19); } LA16: ; LOC20 = (Ropeobj180006*)0; LOC20 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[i_542613_839829468], (*paramtype0).kindU.S4.sym, ri0); add_180482_2381377266(&pl0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj180006* LOC28; { TY535289 LOC26; Ropeobj180006* LOC27; if (!!((pl0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj180006*)0; LOC27 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_180482_2381377266(&pl0, LOC27); } LA24: ; LOC28 = (Ropeobj180006*)0; LOC28 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i_542613_839829468]); add_180482_2381377266(&pl0, LOC28); } LA4: ; res_543217_839829468 += ((NI) 1); } LA3: ; } } rawproc0 = getrawproctype_542459_839829468(p0, typ0); { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 14))&31U)))!=0)) goto LA31; callpattern0 = copyString(((NimStringDesc*) &T839829468_492)); } goto LA29; LA31: ; { callpattern0 = copyString(((NimStringDesc*) &T839829468_493)); } LA29: ; { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36; { NIM_BOOL LOC40; LOC40 = (NIM_BOOL)0; LOC40 = isinvalidreturntype_535548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC40) goto LA41; { NI LOC45; TY535289 LOC48; Ropeobj180006* LOC49; LOC45 = (NI)0; LOC45 = sonslen_297351_850551059(ri0); if (!(((NI) 1) < LOC45)) goto LA46; memset((void*)LOC48, 0, sizeof(LOC48)); LOC49 = (Ropeobj180006*)0; LOC49 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0); add_180482_2381377266(&pl0, LOC49); } LA46: ; { NIM_BOOL LOC52; NIM_BOOL LOC54; Ropeobj180006* LOC67; NimStringDesc* LOC68; TY537235 LOC69; LOC52 = (NIM_BOOL)0; LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC52) goto LA53; LOC54 = (NIM_BOOL)0; LOC54 = leftappearsonrightside_541329_839829468(le0, ri0); LOC52 = !(LOC54); LA53: ; if (!LOC52) goto LA55; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA59; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA57; LA59: ; { NIM_BOOL LOC62; NIM_BOOL LOC64; LOC62 = (NIM_BOOL)0; LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC62)) goto LA63; LOC64 = (NIM_BOOL)0; LOC64 = hasnoinit_541383_839829468(ri0); LOC62 = !(LOC64); LA63: ; if (!LOC62) goto LA65; resetloc_540350_839829468(p0, d0); } goto LA57; LA65: ; LA57: ; LOC67 = (Ropeobj180006*)0; LOC67 = addrloc_540204_839829468((*d0)); add_180482_2381377266(&pl0, LOC67); LOC68 = (NimStringDesc*)0; LOC68 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC68, callpattern0); appendString(LOC68, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = op0.r; LOC69[1] = pl0; LOC69[2] = addcomma_542464_839829468(pl0); LOC69[3] = rawproc0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC68, LOC69, 4); } goto LA50; LA55: ; { Tloc294816 tmp0; Ropeobj180006* LOC71; NimStringDesc* LOC72; TY537235 LOC73; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC71 = (Ropeobj180006*)0; LOC71 = addrloc_540204_839829468(tmp0); add_180482_2381377266(&pl0, LOC71); LOC72 = (NimStringDesc*)0; LOC72 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC72, callpattern0); appendString(LOC72, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = op0.r; LOC73[1] = pl0; LOC73[2] = addcomma_542464_839829468(pl0); LOC73[3] = rawproc0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC72, LOC73, 4); genassignment_541264_839829468(p0, (*d0), tmp0, 0); } LA50: ; } goto LA38; LA41: ; { Tloc294816 list0; TY537235 LOC79; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA77; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA77: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_534273_839829468((&list0), ((Tlockind294808) 9), (*d0).t, ((Tstorageloc294812) 0)); memset((void*)LOC79, 0, sizeof(LOC79)); LOC79[0] = op0.r; LOC79[1] = pl0; LOC79[2] = addcomma_542464_839829468(pl0); LOC79[3] = rawproc0; list0.r = HEX25_180905_2381377266(callpattern0, LOC79, 4); genassignment_541264_839829468(p0, (*d0), list0, 0); } LA38: ; } goto LA34; LA36: ; { NimStringDesc* LOC81; TY537235 LOC82; LOC81 = (NimStringDesc*)0; LOC81 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC81, callpattern0); appendString(LOC81, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC82, 0, sizeof(LOC82)); LOC82[0] = op0.r; LOC82[1] = pl0; LOC82[2] = addcomma_542464_839829468(pl0); LOC82[3] = rawproc0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC81, LOC82, 4); } LA34: ; } N_NIMCALL(Ropeobj180006*, genotherarg_541277_839829468)(Tcproc531021* p0, Tnode294802* ri0, NI i0, Ttype294840* typ0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NI LOC3; Tnode294802* paramtype0; LOC3 = (NI)0; LOC3 = sonslen_297327_850551059(typ0); if (!(i0 < LOC3)) goto LA4; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0]; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscompiletimeonly_330706_3876443242((*paramtype0).typ); if (!LOC8) goto LA9; result0 = NIM_NIL; } goto LA6; LA9: ; { NIM_BOOL LOC12; Tnode294802* LOC16; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind294244) 23)); if (!(LOC12)) goto LA13; LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 64)); LA13: ; if (!LOC12) goto LA14; LOC16 = (Tnode294802*)0; LOC16 = HEX5BHEX5D_295238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0)); result0 = genargnoparam_541938_839829468(p0, LOC16); } goto LA6; LA14: ; { result0 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA6: ; } goto LA1; LA4: ; { { if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0))) goto LA21; localerror_198085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501)); result0 = NIM_NIL; } goto LA19; LA21: ; { result0 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA19: ; } LA1: ; return result0; } N_NIMCALL(Tnode294802*, skipaddrderef_543433_839829468)(Tnode294802* node0) { Tnode294802* result0; Tnode294802* n0; NIM_BOOL isaddr0; { result0 = (Tnode294802*)0; n0 = node0; isaddr0 = NIM_FALSE; switch ((*n0).kind) { case ((Tnodekind294020) 63): case ((Tnodekind294020) 64): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; isaddr0 = NIM_TRUE; } break; case ((Tnodekind294020) 47): case ((Tnodekind294020) 65): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } break; default: { result0 = n0; goto BeforeRet; } break; } { if (!((*n0).kind == ((Tnodekind294020) 66))) goto LA6; n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } LA6: ; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isaddr0; if (!(LOC10)) goto LA11; LOC10 = ((*n0).kind == ((Tnodekind294020) 47) || (*n0).kind == ((Tnodekind294020) 65)); LA11: ; if (!LOC10) goto LA12; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA12: ; { if (!((*n0).kind == ((Tnodekind294020) 63) || (*n0).kind == ((Tnodekind294020) 64))) goto LA15; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA15: ; { result0 = node0; } LA8: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj180006*, genthisarg_543475_839829468)(Tcproc531021* p0, Tnode294802* ri_543478_839829468, NI i0, Ttype294840* typ0) { Ropeobj180006* result0; Tnode294802* ri0; Ttype294840* t0; result0 = (Ropeobj180006*)0; { NI LOC3; NimStringDesc* LOC6; LOC3 = (NI)0; LOC3 = sonslen_297327_850551059(typ0); if (!!((i0 < LOC3))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_198185_1689653243(T839829468_503); internalerror_198113_155036129(LOC6); } LA4: ; ri0 = HEX5BHEX5D_295238_850551059(ri_543478_839829468, i0); { while (1) { if (!((*ri0).kind == ((Tnodekind294020) 66))) goto LA8; ri0 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0)); } LA8: ; } t0 = skiptypes_298099_850551059((*typ0).sons->data[i0], 2048); { Tnode294802* x0; if (!((*t0).kind == ((Ttypekind294244) 23))) goto LA11; { if (!((*ri0).kind == ((Tnodekind294020) 64))) goto LA15; x0 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0)); } goto LA13; LA15: ; { x0 = ri0; } LA13: ; { if (!((*(*x0).typ).kind == ((Ttypekind294244) 21))) goto LA20; result0 = genargnoparam_541938_839829468(p0, x0); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA20: ; { NIM_BOOL LOC23; Tnode294802* LOC25; Tnode294802* LOC28; LOC23 = (NIM_BOOL)0; LOC23 = ((*x0).kind == ((Tnodekind294020) 65) || (*x0).kind == ((Tnodekind294020) 47)); if (!(LOC23)) goto LA24; LOC25 = (Tnode294802*)0; LOC25 = HEX5BHEX5D_295238_850551059(x0, ((NI) 0)); LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind294244) 21)); LA24: ; if (!LOC23) goto LA26; LOC28 = (Tnode294802*)0; LOC28 = HEX5BHEX5D_295238_850551059(x0, ((NI) 0)); result0 = genargnoparam_541938_839829468(p0, LOC28); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA26: ; { result0 = genargnoparam_541938_839829468(p0, x0); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA18: ; } goto LA9; LA11: ; { if (!((*t0).kind == ((Ttypekind294244) 21))) goto LA31; { Tnode294802* LOC37; if (!((*ri0).kind == ((Tnodekind294020) 63) || (*ri0).kind == ((Tnodekind294020) 64))) goto LA35; LOC37 = (Tnode294802*)0; LOC37 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0)); result0 = genargnoparam_541938_839829468(p0, LOC37); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } goto LA33; LA35: ; { result0 = genargnoparam_541938_839829468(p0, ri0); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } LA33: ; } goto LA9; LA31: ; { ri0 = skipaddrderef_543433_839829468(ri0); { if (!((*ri0).kind == ((Tnodekind294020) 63) || (*ri0).kind == ((Tnodekind294020) 64))) goto LA42; ri0 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0)); } LA42: ; result0 = genargnoparam_541938_839829468(p0, ri0); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA9: ; return result0; } N_NIMCALL(Ropeobj180006*, genpatterncall_543699_839829468)(Tcproc531021* p0, Tnode294802* ri_543702_839829468, NimStringDesc* pat0, Ttype294840* typ_543704_839829468) { Ropeobj180006* result0; NI i0; NI j0; result0 = (Ropeobj180006*)0; i0 = ((NI) 0); j0 = ((NI) 1); { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2; switch (((NU8)(pat0->data[i0]))) { case 64: { { NI LOC6; Ropeobj180006* LOC9; LOC6 = (NI)0; LOC6 = len_295081_850551059(ri_543702_839829468); if (!(j0 < LOC6)) goto LA7; LOC9 = (Ropeobj180006*)0; LOC9 = genotherarg_541277_839829468(p0, ri_543702_839829468, j0, typ_543704_839829468); add_180482_2381377266(&result0, LOC9); { NI k_543728_839829468; NI HEX3Atmp_543904_839829468; NI HEX3Atmp_543905_839829468; NI LOC11; NI res_543908_839829468; k_543728_839829468 = (NI)0; HEX3Atmp_543904_839829468 = (NI)0; HEX3Atmp_543905_839829468 = (NI)0; HEX3Atmp_543904_839829468 = (NI)(j0 + ((NI) 1)); LOC11 = (NI)0; LOC11 = len_295081_850551059(ri_543702_839829468); HEX3Atmp_543905_839829468 = (LOC11 - 1); res_543908_839829468 = HEX3Atmp_543904_839829468; { while (1) { TY535289 LOC14; Ropeobj180006* LOC15; Ropeobj180006* LOC16; if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA13; k_543728_839829468 = res_543908_839829468; memset((void*)LOC14, 0, sizeof(LOC14)); LOC15 = (Ropeobj180006*)0; LOC15 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0); add_180482_2381377266(&result0, LOC15); LOC16 = (Ropeobj180006*)0; LOC16 = genotherarg_541277_839829468(p0, ri_543702_839829468, k_543728_839829468, typ_543704_839829468); add_180482_2381377266(&result0, LOC16); res_543908_839829468 += ((NI) 1); } LA13: ; } } } LA7: ; i0 += ((NI) 1); } break; case 35: { { Tnode294802* ri0; if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20; ri0 = HEX5BHEX5D_295238_850551059(ri_543702_839829468, j0); { Ttype294840* typ0; TY535289 LOC31; Ropeobj180006* LOC32; TY535289 LOC46; Ropeobj180006* LOC47; if (!((*ri0).kind == ((Tnodekind294020) 27) || (*ri0).kind == ((Tnodekind294020) 29) || (*ri0).kind == ((Tnodekind294020) 30) || (*ri0).kind == ((Tnodekind294020) 31) || (*ri0).kind == ((Tnodekind294020) 26) || (*ri0).kind == ((Tnodekind294020) 28) || (*ri0).kind == ((Tnodekind294020) 32))) goto LA24; typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { Ropeobj180006* LOC30; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28; LOC30 = (Ropeobj180006*)0; LOC30 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]); add_180482_2381377266(&result0, LOC30); } LA28: ; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj180006*)0; LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0); add_180482_2381377266(&result0, LOC32); { NI LOC35; Ropeobj180006* LOC38; LOC35 = (NI)0; LOC35 = len_295081_850551059(ri0); if (!(((NI) 1) < LOC35)) goto LA36; LOC38 = (Ropeobj180006*)0; LOC38 = genotherarg_541277_839829468(p0, ri0, ((NI) 1), typ0); add_180482_2381377266(&result0, LOC38); } LA36: ; { NI k_543793_839829468; NI HEX3Atmp_543915_839829468; NI HEX3Atmp_543916_839829468; NI LOC40; NI res_543919_839829468; k_543793_839829468 = (NI)0; HEX3Atmp_543915_839829468 = (NI)0; HEX3Atmp_543916_839829468 = (NI)0; HEX3Atmp_543915_839829468 = (NI)(j0 + ((NI) 1)); LOC40 = (NI)0; LOC40 = len_295081_850551059(ri0); HEX3Atmp_543916_839829468 = (LOC40 - 1); res_543919_839829468 = HEX3Atmp_543915_839829468; { while (1) { TY535289 LOC43; Ropeobj180006* LOC44; Ropeobj180006* LOC45; if (!(res_543919_839829468 <= HEX3Atmp_543916_839829468)) goto LA42; k_543793_839829468 = res_543919_839829468; memset((void*)LOC43, 0, sizeof(LOC43)); LOC44 = (Ropeobj180006*)0; LOC44 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0); add_180482_2381377266(&result0, LOC44); LOC45 = (Ropeobj180006*)0; LOC45 = genotherarg_541277_839829468(p0, ri0, k_543793_839829468, typ0); add_180482_2381377266(&result0, LOC45); res_543919_839829468 += ((NI) 1); } LA42: ; } } memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (Ropeobj180006*)0; LOC47 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0); add_180482_2381377266(&result0, LOC47); } goto LA22; LA24: ; { localerror_198085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502)); } LA22: ; i0 += ((NI) 1); } goto LA18; LA20: ; { Ropeobj180006* LOC52; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50; LOC52 = (Ropeobj180006*)0; LOC52 = genthisarg_543475_839829468(p0, ri_543702_839829468, j0, typ_543704_839829468); add_180482_2381377266(&result0, LOC52); i0 += ((NI) 1); } goto LA18; LA50: ; { Tnode294802* arg0; Ropeobj180006* LOC58; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54; arg0 = skipaddrderef_543433_839829468((*ri_543702_839829468).kindU.S6.sons->data[j0]); { while (1) { if (!((*arg0).kind == ((Tnodekind294020) 63) || (*arg0).kind == ((Tnodekind294020) 64) || (*arg0).kind == ((Tnodekind294020) 66))) goto LA57; arg0 = HEX5BHEX5D_295238_850551059(arg0, ((NI) 0)); } LA57: ; } LOC58 = (Ropeobj180006*)0; LOC58 = genargnoparam_541938_839829468(p0, arg0); add_180482_2381377266(&result0, LOC58); } goto LA18; LA54: ; { Ropeobj180006* LOC60; LOC60 = (Ropeobj180006*)0; LOC60 = genotherarg_541277_839829468(p0, ri_543702_839829468, j0, typ_543704_839829468); add_180482_2381377266(&result0, LOC60); } LA18: ; j0 += ((NI) 1); i0 += ((NI) 1); } break; case 39: { NI idx0; NI stars0; idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC64; Ttype294840* t0; LOC64 = (NIM_BOOL)0; LOC64 = scancppgenericslot_536827_839829468(pat0, (&i0), (&idx0), (&stars0)); if (!LOC64) goto LA65; t0 = resolvestarsincpptype_536891_839829468(typ_543704_839829468, idx0, stars0); { TY535289 LOC71; Ropeobj180006* LOC72; if (!(t0 == NIM_NIL)) goto LA69; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj180006*)0; LOC72 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0); add_180482_2381377266(&result0, LOC72); } goto LA67; LA69: ; { Ropeobj180006* LOC74; LOC74 = (Ropeobj180006*)0; LOC74 = gettypedesc_537671_839829468((*p0).module, t0); add_180482_2381377266(&result0, LOC74); } LA67: ; } LA65: ; } break; default: { NI start0; start0 = i0; { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77; { if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80; i0 += ((NI) 1); } goto LA78; LA80: ; { goto LA76; } LA78: ; } LA77: ; } LA76: ; { NimStringDesc* LOC87; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85; LOC87 = (NimStringDesc*)0; LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1))); add_180487_2381377266(&result0, LOC87); } LA85: ; } break; } } LA2: ; } return result0; } N_NIMCALL(void, fixupcall_541410_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0, Ropeobj180006* callee0, Ropeobj180006* params0) { Ropeobj180006* pl0; TY535289 LOC1; Ropeobj180006* LOC2; Ropeobj180006* LOC3; Ttype294840* typ0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj180006*)0; LOC2 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0); LOC3 = (Ropeobj180006*)0; LOC3 = HEX26_180418_2381377266(callee0, LOC2); pl0 = HEX26_180418_2381377266(LOC3, params0); typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isinvalidreturntype_535548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC10) goto LA11; { TY535289 LOC17; Ropeobj180006* LOC18; if (!!((params0 == NIM_NIL))) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC18 = (Ropeobj180006*)0; LOC18 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0); add_180482_2381377266(&pl0, LOC18); } LA15: ; { NIM_BOOL LOC21; NIM_BOOL LOC23; Ropeobj180006* LOC36; TY535289 LOC37; Ropeobj180006* LOC38; LOC21 = (NIM_BOOL)0; LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC21) goto LA22; LOC23 = (NIM_BOOL)0; LOC23 = leftappearsonrightside_541329_839829468(le0, ri0); LOC21 = !(LOC23); LA22: ; if (!LOC21) goto LA24; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA28; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA26; LA28: ; { NIM_BOOL LOC31; NIM_BOOL LOC33; LOC31 = (NIM_BOOL)0; LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC31)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = hasnoinit_541383_839829468(ri0); LOC31 = !(LOC33); LA32: ; if (!LOC31) goto LA34; resetloc_540350_839829468(p0, d0); } goto LA26; LA34: ; LA26: ; LOC36 = (Ropeobj180006*)0; LOC36 = addrloc_540204_839829468((*d0)); add_180482_2381377266(&pl0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj180006*)0; LOC38 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0); add_180482_2381377266(&pl0, LOC38); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); } goto LA19; LA24: ; { Tloc294816 tmp0; Ropeobj180006* LOC40; TY535289 LOC41; Ropeobj180006* LOC42; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC40 = (Ropeobj180006*)0; LOC40 = addrloc_540204_839829468(tmp0); add_180482_2381377266(&pl0, LOC40); memset((void*)LOC41, 0, sizeof(LOC41)); LOC42 = (Ropeobj180006*)0; LOC42 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0); add_180482_2381377266(&pl0, LOC42); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); genassignment_541264_839829468(p0, (*d0), tmp0, 0); } LA19: ; } goto LA8; LA11: ; { TY535289 LOC44; Ropeobj180006* LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj180006*)0; LOC45 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0); add_180482_2381377266(&pl0, LOC45); { NIM_BOOL LOC48; NIM_BOOL LOC49; LOC48 = (NIM_BOOL)0; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA50: ; LOC48 = LOC49; if (!(LOC48)) goto LA51; LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag294810) 8))&15U)))!=0); LA51: ; if (!LOC48) goto LA52; (*d0).k = ((Tlockind294808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag294810) 8)) % (sizeof(NU16)*8))); } goto LA46; LA52: ; { Tloc294816 list0; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA57; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA57: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_534273_839829468((&list0), ((Tlockind294808) 9), (*d0).t, ((Tstorageloc294812) 0)); list0.r = pl0; genassignment_541264_839829468(p0, (*d0), list0, 0); } LA46: ; } LA8: ; } goto LA4; LA6: ; { TY535289 LOC60; Ropeobj180006* LOC61; memset((void*)LOC60, 0, sizeof(LOC60)); LOC61 = (Ropeobj180006*)0; LOC61 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0); add_180482_2381377266(&pl0, LOC61); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); } LA4: ; } N_NIMCALL(void, geninfixcall_543929_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) { Tloc294816 op0; Ttype294840* typ_543940_839829468; NI length0; NimStringDesc* pat0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); typ_543940_839829468 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_297351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC5; if (!!(!((pat0 == NIM_NIL)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_198185_1689653243(T839829468_498); internalerror_198113_155036129(LOC5); } LA3: ; { NIM_BOOL LOC8; Ropeobj180006* pl0; Ttype294840* typ0; LOC8 = (NIM_BOOL)0; LOC8 = contains_110056_4286263276(pat0, T839829468_500); if (!LOC8) goto LA9; pl0 = genpatterncall_543699_839829468(p0, ri0, pat0, typ_543940_839829468); typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag294810) 8))&15U)))!=0); LA20: ; if (!LOC17) goto LA21; (*d0).k = ((Tlockind294808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag294810) 8)) % (sizeof(NU16)*8))); } goto LA15; LA21: ; { Tloc294816 list0; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA26; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA26: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_534273_839829468((&list0), ((Tlockind294808) 9), (*d0).t, ((Tstorageloc294812) 0)); list0.r = pl0; genassignment_541264_839829468(p0, (*d0), list0, 0); } LA15: ; } goto LA11; LA13: ; { TY535289 LOC29; Ropeobj180006* LOC30; memset((void*)LOC29, 0, sizeof(LOC29)); LOC30 = (Ropeobj180006*)0; LOC30 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0); add_180482_2381377266(&pl0, LOC30); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); } LA11: ; } goto LA6; LA9: ; { Ropeobj180006* pl0; Ropeobj180006* params0; pl0 = NIM_NIL; { NI LOC34; Ropeobj180006* LOC37; LOC34 = (NI)0; LOC34 = len_295081_850551059(ri0); if (!(((NI) 1) < LOC34)) goto LA35; LOC37 = (Ropeobj180006*)0; LOC37 = genthisarg_543475_839829468(p0, ri0, ((NI) 1), typ_543940_839829468); add_180482_2381377266(&pl0, LOC37); } LA35: ; add_180482_2381377266(&pl0, op0.r); params0 = (Ropeobj180006*)0; { NI i_544425_839829468; NI HEX3Atmp_544609_839829468; NI res_544612_839829468; i_544425_839829468 = (NI)0; HEX3Atmp_544609_839829468 = (NI)0; HEX3Atmp_544609_839829468 = (NI)(length0 - ((NI) 1)); res_544612_839829468 = ((NI) 2); { while (1) { Ropeobj180006* LOC47; if (!(res_544612_839829468 <= HEX3Atmp_544609_839829468)) goto LA40; i_544425_839829468 = res_544612_839829468; { TY535289 LOC45; Ropeobj180006* LOC46; if (!!((params0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj180006*)0; LOC46 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0); add_180482_2381377266(&params0, LOC46); } LA43: ; LOC47 = (Ropeobj180006*)0; LOC47 = genotherarg_541277_839829468(p0, ri0, i_544425_839829468, typ_543940_839829468); add_180482_2381377266(&params0, LOC47); res_544612_839829468 += ((NI) 1); } LA40: ; } } fixupcall_541410_839829468(p0, le0, ri0, d0, pl0, params0); } LA6: ; } N_NIMCALL(void, gennamedparamcall_544616_839829468)(Tcproc531021* p0, Tnode294802* ri0, Tloc294816* d0) { Tloc294816 op0; Ropeobj180006* pl0; TY535289 LOC1; Ttype294840* typ0; NI length0; NimStringDesc* pat0; NI start0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); memset((void*)LOC1, 0, sizeof(LOC1)); pl0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0); typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_297351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC6; if (!!(!((pat0 == NIM_NIL)))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_198185_1689653243(T839829468_507); internalerror_198113_155036129(LOC6); } LA4: ; start0 = ((NI) 3); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = contains_110046_4286263276(pat0, 32); if (!LOC9) goto LA10; start0 = ((NI) 1); add_180482_2381377266(&pl0, op0.r); { TY535289 LOC16; Ropeobj180006* LOC17; Ropeobj180006* LOC18; if (!(((NI) 1) < length0)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj180006*)0; LOC17 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0); add_180482_2381377266(&pl0, LOC17); LOC18 = (Ropeobj180006*)0; LOC18 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_180482_2381377266(&pl0, LOC18); start0 = ((NI) 2); } LA14: ; } goto LA7; LA10: ; { { Ropeobj180006* LOC24; TY535289 LOC25; Ropeobj180006* LOC26; if (!(((NI) 1) < length0)) goto LA22; LOC24 = (Ropeobj180006*)0; LOC24 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_180482_2381377266(&pl0, LOC24); memset((void*)LOC25, 0, sizeof(LOC25)); LOC26 = (Ropeobj180006*)0; LOC26 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0); add_180482_2381377266(&pl0, LOC26); } LA22: ; add_180482_2381377266(&pl0, op0.r); { TY535289 LOC31; Ropeobj180006* LOC32; Ropeobj180006* LOC33; if (!(((NI) 2) < length0)) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj180006*)0; LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0); add_180482_2381377266(&pl0, LOC32); LOC33 = (Ropeobj180006*)0; LOC33 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0); add_180482_2381377266(&pl0, LOC33); } LA29: ; } LA7: ; { NI i_545051_839829468; NI HEX3Atmp_545617_839829468; NI res_545620_839829468; i_545051_839829468 = (NI)0; HEX3Atmp_545617_839829468 = (NI)0; HEX3Atmp_545617_839829468 = (NI)(length0 - ((NI) 1)); res_545620_839829468 = start0; { while (1) { Tsym294834* param0; TY535289 LOC42; Ropeobj180006* LOC43; TY535289 LOC44; Ropeobj180006* LOC45; Ropeobj180006* LOC46; if (!(res_545620_839829468 <= HEX3Atmp_545617_839829468)) goto LA36; i_545051_839829468 = res_545620_839829468; { NI LOC39; LOC39 = (NI)0; LOC39 = sonslen_297327_850551059(typ0); if (!(LOC39 <= i_545051_839829468)) goto LA40; internalerror_198100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508)); } LA40: ; param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_545051_839829468]).kindU.S4.sym; memset((void*)LOC42, 0, sizeof(LOC42)); LOC43 = (Ropeobj180006*)0; LOC43 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0); add_180482_2381377266(&pl0, LOC43); add_180487_2381377266(&pl0, (*(*param0).name).s); memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj180006*)0; LOC45 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0); add_180482_2381377266(&pl0, LOC45); LOC46 = (Ropeobj180006*)0; LOC46 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[i_545051_839829468], param0, ri0); add_180482_2381377266(&pl0, LOC46); res_545620_839829468 += ((NI) 1); } LA36: ; } } { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = isinvalidreturntype_535548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC53) goto LA54; { NI LOC58; TY535289 LOC61; Ropeobj180006* LOC62; LOC58 = (NI)0; LOC58 = sonslen_297351_850551059(ri0); if (!(((NI) 1) < LOC58)) goto LA59; memset((void*)LOC61, 0, sizeof(LOC61)); LOC62 = (Ropeobj180006*)0; LOC62 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0); add_180482_2381377266(&pl0, LOC62); } LA59: ; { TY535289 LOC71; Ropeobj180006* LOC72; Ropeobj180006* LOC73; TY535289 LOC74; Ropeobj180006* LOC75; if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA69; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } LA69: ; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj180006*)0; LOC72 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0); add_180482_2381377266(&pl0, LOC72); LOC73 = (Ropeobj180006*)0; LOC73 = addrloc_540204_839829468((*d0)); add_180482_2381377266(&pl0, LOC73); memset((void*)LOC74, 0, sizeof(LOC74)); LOC75 = (Ropeobj180006*)0; LOC75 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0); add_180482_2381377266(&pl0, LOC75); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); } goto LA63; LA65: ; { Tloc294816 tmp0; Ropeobj180006* LOC77; TY535289 LOC78; Ropeobj180006* LOC79; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC77 = (Ropeobj180006*)0; LOC77 = addrloc_540204_839829468(tmp0); add_180482_2381377266(&pl0, LOC77); memset((void*)LOC78, 0, sizeof(LOC78)); LOC79 = (Ropeobj180006*)0; LOC79 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0); add_180482_2381377266(&pl0, LOC79); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); genassignment_541264_839829468(p0, (*d0), tmp0, 0); } LA63: ; } goto LA51; LA54: ; { TY535289 LOC81; Ropeobj180006* LOC82; Tloc294816 list0; memset((void*)LOC81, 0, sizeof(LOC81)); LOC82 = (Ropeobj180006*)0; LOC82 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0); add_180482_2381377266(&pl0, LOC82); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA85; gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA85: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_534273_839829468((&list0), ((Tlockind294808) 9), NIM_NIL, ((Tstorageloc294812) 0)); list0.r = pl0; genassignment_541264_839829468(p0, (*d0), list0, 0); } LA51: ; } goto LA47; LA49: ; { TY535289 LOC88; Ropeobj180006* LOC89; memset((void*)LOC88, 0, sizeof(LOC88)); LOC89 = (Ropeobj180006*)0; LOC89 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0); add_180482_2381377266(&pl0, LOC89); line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0); } LA47: ; } N_NIMCALL(void, genprefixcall_541960_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) { Tloc294816 op0; Ropeobj180006* params0; Ttype294840* typ0; NI length0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); params0 = (Ropeobj180006*)0; typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_297351_850551059(ri0); { NI i_542213_839829468; NI HEX3Atmp_542445_839829468; NI res_542448_839829468; i_542213_839829468 = (NI)0; HEX3Atmp_542445_839829468 = (NI)0; HEX3Atmp_542445_839829468 = (NI)(length0 - ((NI) 1)); res_542448_839829468 = ((NI) 1); { while (1) { if (!(res_542448_839829468 <= HEX3Atmp_542445_839829468)) goto LA3; i_542213_839829468 = res_542448_839829468; { NI LOC6; Tnode294802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_297327_850551059(typ0); if (!(i_542213_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_542213_839829468]; { NIM_BOOL LOC11; Ropeobj180006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_330706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY535289 LOC18; Ropeobj180006* LOC19; if (!!((params0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj180006*)0; LOC19 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_180482_2381377266(&params0, LOC19); } LA16: ; LOC20 = (Ropeobj180006*)0; LOC20 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[i_542213_839829468], (*paramtype0).kindU.S4.sym, ri0); add_180482_2381377266(&params0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj180006* LOC28; { TY535289 LOC26; Ropeobj180006* LOC27; if (!!((params0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj180006*)0; LOC27 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_180482_2381377266(&params0, LOC27); } LA24: ; LOC28 = (Ropeobj180006*)0; LOC28 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i_542213_839829468]); add_180482_2381377266(&params0, LOC28); } LA4: ; res_542448_839829468 += ((NI) 1); } LA3: ; } } fixupcall_541410_839829468(p0, le0, ri0, d0, op0.r, params0); } static N_INLINE(void, poststmtactions_534942_839829468)(Tcproc531021* p0) { Ropeobj180006** LOC1; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(LOC1, (*(*p0).module).injectstmt); } N_NIMCALL(void, gencall_545632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { { Ttype294840* LOC3; LOC3 = (Ttype294840*)0; LOC3 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention294002) 8))) goto LA4; genclosurecall_542452_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_543929_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_544616_839829468(p0, e0, d0); } goto LA1; LA14: ; { genprefixcall_541960_839829468(p0, NIM_NIL, e0, d0); } LA1: ; poststmtactions_534942_839829468(p0); } N_NIMCALL(void, genreset_556731_839829468)(Tcproc531021* p0, Tnode294802* n0) { Tloc294816 a0; TY534811 LOC1; Ttype294840* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = addrloc_540204_839829468(a0); LOC2 = (Ttype294840*)0; LOC2 = skiptypes_298099_850551059(a0.t, IL64(211106242013440)); LOC1[1] = gentypeinfo_537941_839829468((*p0).module, LOC2); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2); } N_NIMCALL(void, genecho_556369_839829468)(Tcproc531021* p0, Tnode294802* n0) { NIM_BOOL LOC6; Ropeobj180006* args0; Tloc294816 a0; TY534811 LOC18; NimStringDesc* LOC19; NI LOC20; NimStringDesc* LOC21; TY535289 LOC22; { NimStringDesc* LOC5; if (!!(((*n0).kind == ((Tnodekind294020) 41)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_198185_1689653243(T839829468_512); internalerror_198113_155036129(LOC5); } LA3: ; LOC6 = (NIM_BOOL)0; LOC6 = includestr_148249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513)); args0 = NIM_NIL; memset((void*)(&a0), 0, sizeof(a0)); { NI i_556404_839829468; NI HEX3Atmp_556431_839829468; NI LOC8; NI res_556434_839829468; i_556404_839829468 = (NI)0; HEX3Atmp_556431_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_295081_850551059(n0); HEX3Atmp_556431_839829468 = (NI)(LOC8 - ((NI) 1)); res_556434_839829468 = ((NI) 0); { while (1) { if (!(res_556434_839829468 <= HEX3Atmp_556431_839829468)) goto LA10; i_556404_839829468 = res_556434_839829468; { Tnode294802* LOC13; LOC13 = (Tnode294802*)0; LOC13 = skipconv_330882_3876443242((*n0).kindU.S6.sons->data[i_556404_839829468]); if (!((*LOC13).kind == ((Tnodekind294020) 23))) goto LA14; add_180487_2381377266(&args0, ((NimStringDesc*) &T839829468_514)); } goto LA11; LA14: ; { TY180507 LOC17; initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[i_556404_839829468], (&a0)); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_540188_839829468(a0); addf_181205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1); } LA11: ; res_556434_839829468 += ((NI) 1); } LA10: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (NimStringDesc*)0; LOC20 = (NI)0; LOC20 = len_295081_850551059(n0); LOC21 = (NimStringDesc*)0; LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20))); LOC19 = rawNewString(LOC21->Sup.len + tnl_178644_4151366050->Sup.len + 0); appendString(LOC19, LOC21); appendString(LOC19, tnl_178644_4151366050); LOC18[0] = makecstring_193638_155036129(LOC19); LOC18[1] = args0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2); memset((void*)LOC22, 0, sizeof(LOC22)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0); } N_NIMCALL(void, genseqconstr_557004_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) { Tloc294816 arr0; NI LOC5; Ropeobj180006* LOC6; memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA3; gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA3: ; LOC5 = (NI)0; LOC5 = sonslen_297351_850551059(t0); LOC6 = (Ropeobj180006*)0; LOC6 = intliteral_541270_839829468(((NI64) (LOC5))); gennewseqaux_556795_839829468(p0, (*d0), LOC6); { NI i_557031_839829468; NI HEX3Atmp_557039_839829468; NI LOC8; NI res_557042_839829468; i_557031_839829468 = (NI)0; HEX3Atmp_557039_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = sonslen_297351_850551059(t0); HEX3Atmp_557039_839829468 = (NI)(LOC8 - ((NI) 1)); res_557042_839829468 = ((NI) 0); { while (1) { Ttype294840* LOC11; Ttype294840* LOC12; TY534811 LOC13; if (!(res_557042_839829468 <= HEX3Atmp_557039_839829468)) goto LA10; i_557031_839829468 = res_557042_839829468; LOC11 = (Ttype294840*)0; LOC11 = skiptypes_298099_850551059((*t0).typ, IL64(211106232576256)); LOC12 = (Ttype294840*)0; LOC12 = elemtype_322394_3876443242(LOC11); initloc_534273_839829468((&arr0), ((Tlockind294808) 6), LOC12, ((Tstorageloc294812) 3)); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_540188_839829468((*d0)); LOC13[1] = intliteral_541270_839829468(((NI64) (i_557031_839829468))); arr0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2); arr0.s = ((Tstorageloc294812) 3); expr_541248_839829468(p0, (*t0).kindU.S6.sons->data[i_557031_839829468], (&arr0)); res_557042_839829468 += ((NI) 1); } LA10: ; } } gcusage_556439_839829468(t0); } N_NIMCALL(void, genarrtoseq_557046_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) { Tloc294816 elem0; Tloc294816 a0; Tloc294816 arr0; NI L0; NI64 LOC9; Ropeobj180006* LOC10; { memset((void*)(&elem0), 0, sizeof(elem0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*t0).kind == ((Tnodekind294020) 41))) goto LA3; asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ); genseqconstr_557004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0); goto BeforeRet; } LA3: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA7; gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA7: ; LOC9 = (NI64)0; LOC9 = lengthord_322007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ); L0 = ((NI) (LOC9)); LOC10 = (Ropeobj180006*)0; LOC10 = intliteral_541270_839829468(((NI64) (L0))); gennewseqaux_556795_839829468(p0, (*d0), LOC10); initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI i_557090_839829468; NI HEX3Atmp_557103_839829468; NI res_557106_839829468; i_557090_839829468 = (NI)0; HEX3Atmp_557103_839829468 = (NI)0; HEX3Atmp_557103_839829468 = (NI)(L0 - ((NI) 1)); res_557106_839829468 = ((NI) 0); { while (1) { Ttype294840* LOC14; Ttype294840* LOC15; TY534811 LOC16; Ttype294840* LOC17; Ttype294840* LOC18; TY534811 LOC19; if (!(res_557106_839829468 <= HEX3Atmp_557103_839829468)) goto LA13; i_557090_839829468 = res_557106_839829468; LOC14 = (Ttype294840*)0; LOC14 = skiptypes_298099_850551059((*t0).typ, IL64(211106232576256)); LOC15 = (Ttype294840*)0; LOC15 = elemtype_322394_3876443242(LOC14); initloc_534273_839829468((&elem0), ((Tlockind294808) 6), LOC15, ((Tstorageloc294812) 3)); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_540188_839829468((*d0)); LOC16[1] = intliteral_541270_839829468(((NI64) (i_557090_839829468))); elem0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2); elem0.s = ((Tstorageloc294812) 3); LOC17 = (Ttype294840*)0; LOC17 = skiptypes_298099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256)); LOC18 = (Ttype294840*)0; LOC18 = elemtype_322394_3876443242(LOC17); initloc_534273_839829468((&arr0), ((Tlockind294808) 6), LOC18, a0.s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_540188_839829468(a0); LOC19[1] = intliteral_541270_839829468(((NI64) (i_557090_839829468))); arr0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2); genassignment_541264_839829468(p0, elem0, arr0, 3); res_557106_839829468 += ((NI) 1); } LA13: ; } } }BeforeRet: ; } N_NIMCALL(void, gendeepcopy_552374_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0) { Ttype294840* ty0; ty0 = skiptypes_298099_850551059(dest0.t, IL64(211106242013440)); switch ((*ty0).kind) { case ((Ttypekind294244) 21): case ((Ttypekind294244) 22): case ((Ttypekind294244) 25): case ((Ttypekind294244) 18): case ((Ttypekind294244) 17): case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { TY537238 LOC2; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = addrloc_540204_839829468(dest0); LOC2[1] = addrloc_540204_839829468(src0); LOC2[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3); } break; case ((Ttypekind294244) 24): case ((Ttypekind294244) 28): { TY537238 LOC4; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = addrloc_540204_839829468(dest0); LOC4[1] = rdloc_540188_839829468(src0); LOC4[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3); } break; case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { TY537238 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = addrloc_540204_839829468(dest0); LOC6[1] = addrloc_540204_839829468(src0); LOC6[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3); } break; case ((Ttypekind294244) 19): { { Tctypekind531007 LOC10; TY537238 LOC13; NI64 LOC14; LOC10 = (Tctypekind531007)0; LOC10 = maptype_535393_839829468(ty0); if (!(LOC10 == ((Tctypekind531007) 17))) goto LA11; usestringh_534345_839829468((*p0).module); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_540188_839829468(dest0); LOC13[1] = rdloc_540188_839829468(src0); LOC14 = (NI64)0; LOC14 = getsize_322135_3876443242(dest0.t); LOC13[2] = rope_180401_2381377266(LOC14); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3); } goto LA8; LA11: ; { TY534811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_540188_839829468(dest0); LOC16[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2); } LA8: ; } break; case ((Ttypekind294244) 26): case ((Ttypekind294244) 2): case ((Ttypekind294244) 1): case ((Ttypekind294244) 14): case ((Ttypekind294244) 29): case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44): case ((Ttypekind294244) 20): case ((Ttypekind294244) 23): { TY534811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_540188_839829468(dest0); LOC18[1] = rdloc_540188_839829468(src0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI294244))->Sup.len + 13); appendString(LOC20, ((NimStringDesc*) &T839829468_522)); appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI294244))); internalerror_198113_155036129(LOC20); } break; } } N_NIMCALL(void, genmagicexpr_559033_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) { switch (op0) { case ((Tmagic294524) 127): case ((Tmagic294524) 126): { genandor_556311_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 99) ... ((Tmagic294524) 117): { unaryarith_554646_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 96) ... ((Tmagic294524) 98): { unaryarithoverflow_553633_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 52) ... ((Tmagic294524) 55): { binaryfloatarith_558728_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 56) ... ((Tmagic294524) 93): { binaryarith_553819_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 95): { geneqproc_554214_839829468(p0, e0, d0); } break; case ((Tmagic294524) 45) ... ((Tmagic294524) 51): { binaryarithoverflow_553262_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 149): { genrepr_557339_839829468(p0, e0, d0); } break; case ((Tmagic294524) 259): { gengettypeinfo_557383_839829468(p0, e0, d0); } break; case ((Tmagic294524) 156): { genswap_557638_839829468(p0, e0, d0); } break; case ((Tmagic294524) 25): { { if (!!((((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0))) goto LA14; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385)); } goto LA12; LA14: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386)); } LA12: ; } break; case ((Tmagic294524) 26): case ((Tmagic294524) 27): { Ttype294840* underlying0; underlying0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = !((((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0)); if (LOC20) goto LA21; LOC20 = ((*underlying0).kind >= ((Ttypekind294244) 40) && (*underlying0).kind <= ((Ttypekind294244) 44)); LA21: ; if (!LOC20) goto LA22; binarystmt_552501_839829468(p0, e0, d0, opr_559050_839829468[(op0)- 26]); } goto LA18; LA22: ; { Tloc294816 a0; Tloc294816 b0; Ttype294840* ranged0; Ropeobj180006* res0; NimStringDesc* LOC25; TY534811 LOC31; Ropeobj180006* LOC32; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); ranged0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656); LOC25 = (NimStringDesc*)0; { if (!((*underlying0).kind == ((Ttypekind294244) 35))) goto LA28; LOC25 = copyString(fun64_559055_839829468[(op0)- 26]); } goto LA26; LA28: ; { LOC25 = copyString(fun_559060_839829468[(op0)- 26]); } LA26: ; res0 = binaryarithoverflowraw_553235_839829468(p0, ranged0, a0, b0, LOC25); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = gettypedesc_537671_839829468((*p0).module, ranged0); LOC31[1] = res0; LOC32 = (Ropeobj180006*)0; LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2); putintodest_552468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc294812) 0)); } LA18: ; } break; case ((Tmagic294524) 138): { genstrconcat_556452_839829468(p0, e0, d0); } break; case ((Tmagic294524) 144): { binarystmt_552501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394)); } break; case ((Tmagic294524) 145): { genstrappend_556554_839829468(p0, e0, d0); } break; case ((Tmagic294524) 146): { genseqelemappend_556683_839829468(p0, e0, d0); } break; case ((Tmagic294524) 128): { genstrequals_558666_839829468(p0, e0, d0); } break; case ((Tmagic294524) 129): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402)); } break; case ((Tmagic294524) 130): { binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403)); } break; case ((Tmagic294524) 157): { genisnil_554620_839829468(p0, e0, d0); } break; case ((Tmagic294524) 120): { gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406)); } break; case ((Tmagic294524) 121): { gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407)); } break; case ((Tmagic294524) 119): { gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408)); } break; case ((Tmagic294524) 118): { gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409)); } break; case ((Tmagic294524) 122): { gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410)); } break; case ((Tmagic294524) 123): { gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411)); } break; case ((Tmagic294524) 124): { expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Tmagic294524) 125): { genrepr_557339_839829468(p0, e0, d0); } break; case ((Tmagic294524) 12): { genof_557331_839829468(p0, e0, d0); } break; case ((Tmagic294524) 29): { gennew_556782_839829468(p0, e0); } break; case ((Tmagic294524) 30): { gennewfinalize_557110_839829468(p0, e0); } break; case ((Tmagic294524) 31): { gennewseq_556824_839829468(p0, e0); } break; case ((Tmagic294524) 32): { gennewseqofcap_556836_839829468(p0, e0, d0); } break; case ((Tmagic294524) 9): { Ttype294840* t0; TY180507 LOC55; Ropeobj180006* LOC56; t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256); memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC56 = (Ropeobj180006*)0; LOC56 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc294812) 0)); } break; case ((Tmagic294524) 42): { gensomecast_558480_839829468(p0, e0, d0); } break; case ((Tmagic294524) 28): { genord_558474_839829468(p0, e0, d0); } break; case ((Tmagic294524) 35): case ((Tmagic294524) 8): case ((Tmagic294524) 34): case ((Tmagic294524) 36): case ((Tmagic294524) 33): { genarraylen_557415_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 37): case ((Tmagic294524) 38): { { NIM_BOOL LOC63; LOC63 = (NIM_BOOL)0; LOC63 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC63) goto LA64; LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA64: ; if (!!(LOC63)) goto LA65; unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440)); } goto LA61; LA65: ; { unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441)); } LA61: ; } break; case ((Tmagic294524) 43): { unarystmt_552527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443)); } break; case ((Tmagic294524) 44): { unarystmt_552527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444)); } break; case ((Tmagic294524) 151): { gensetlengthstr_557632_839829468(p0, e0, d0); } break; case ((Tmagic294524) 152): { gensetlengthseq_557500_839829468(p0, e0, d0); } break; case ((Tmagic294524) 39): case ((Tmagic294524) 40): case ((Tmagic294524) 41): case ((Tmagic294524) 133): case ((Tmagic294524) 132): case ((Tmagic294524) 131): case ((Tmagic294524) 134): case ((Tmagic294524) 135): case ((Tmagic294524) 136): case ((Tmagic294524) 148): { gensetop_558419_839829468(p0, e0, d0, op0); } break; case ((Tmagic294524) 161): case ((Tmagic294524) 162): case ((Tmagic294524) 159): case ((Tmagic294524) 160): case ((Tmagic294524) 150): case ((Tmagic294524) 163): { Tsym294834* opr0; opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NimStringDesc* LOC78; Ropeobj180006* LOC79; if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0))) goto LA76; LOC78 = (NimStringDesc*)0; LOC78 = HEX24_180856_2381377266((*opr0).loc.r); LOC79 = (Ropeobj180006*)0; LOC79 = cgsym_534403_839829468((*p0).module, LOC78); } LA76: ; gencall_545632_839829468(p0, e0, d0); } break; case ((Tmagic294524) 164): { genreset_556731_839829468(p0, e0); } break; case ((Tmagic294524) 17): { Tnode294802* LOC82; Tnode294802* LOC83; LOC82 = (Tnode294802*)0; LOC82 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1)); LOC83 = (Tnode294802*)0; LOC83 = skipconv_330882_3876443242(LOC82); genecho_556369_839829468(p0, LOC83); } break; case ((Tmagic294524) 158): { genarrtoseq_557046_839829468(p0, e0, d0); } break; case ((Tmagic294524) 223) ... ((Tmagic294524) 257): case ((Tmagic294524) 19) ... ((Tmagic294524) 24): { localerror_198080_155036129((*e0).info, ((Tmsgkind193002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); } break; case ((Tmagic294524) 208): { Tnode294802* n0; n0 = wrapprocforspawn_437501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL); expr_541248_839829468(p0, n0, d0); } break; case ((Tmagic294524) 155): { Tnode294802* n0; n0 = liftparallel_480822_1773027539((*(*p0).module).module, e0); expr_541248_839829468(p0, n0, d0); } break; case ((Tmagic294524) 209): { Tloc294816 a0; Tloc294816 b0; Tnode294802* x0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { Tnode294802* LOC91; Tnode294802* LOC94; LOC91 = (Tnode294802*)0; LOC91 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1)); if (!((*LOC91).kind == ((Tnodekind294020) 63) || (*LOC91).kind == ((Tnodekind294020) 64))) goto LA92; LOC94 = (Tnode294802*)0; LOC94 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1)); x0 = HEX5BHEX5D_295238_850551059(LOC94, ((NI) 0)); } goto LA89; LA92: ; { x0 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1)); } LA89: ; initlocexpr_541283_839829468(p0, x0, (&a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); gendeepcopy_552374_839829468(p0, a0, b0); } break; case ((Tmagic294524) 140): case ((Tmagic294524) 94): { gencall_545632_839829468(p0, e0, d0); } break; default: { NimStringDesc* LOC98; LOC98 = (NimStringDesc*)0; LOC98 = rawNewString(reprEnum((NI)op0, (&NTI294524))->Sup.len + 14); appendString(LOC98, ((NimStringDesc*) &T839829468_523)); appendString(LOC98, reprEnum((NI)op0, (&NTI294524))); internalerror_198100_155036129((*e0).info, LOC98); } break; } } N_NIMCALL(Ropeobj180006*, gensetnode_551664_839829468)(Tcproc531021* p0, Tnode294802* n0) { Ropeobj180006* result0; Tbitset341004* cs0; NI size0; NI64 LOC1; result0 = (Ropeobj180006*)0; cs0 = (Tbitset341004*)0; LOC1 = (NI64)0; LOC1 = getsize_322135_3876443242((*n0).typ); size0 = ((NI) (LOC1)); tobitset_342001_452470228(n0, (&cs0)); { NI id0; Ropeobj180006* LOC6; if (!(((NI) 8) < size0)) goto LA4; id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC6 = (Ropeobj180006*)0; LOC6 = rope_180401_2381377266(((NI64) (id0))); result0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC6); { TY537238 LOC11; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_537671_839829468((*p0).module, (*n0).typ); LOC11[1] = result0; LOC11[2] = genrawsetdata_551629_839829468(cs0, size0); addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3); } LA9: ; } goto LA2; LA4: ; { result0 = genrawsetdata_551629_839829468(cs0, size0); } LA2: ; return result0; } N_NIMCALL(void, gensetconstr_559496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Tloc294816 idx0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&idx0), 0, sizeof(idx0)); { Ropeobj180006* LOC5; if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag294427) 4))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj180006*)0; LOC5 = gensetnode_551664_839829468(p0, e0); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc294812) 0)); } goto LA1; LA3: ; { { if (!((*d0).k == ((Tlockind294808) 0))) goto LA9; gettemp_539032_839829468(p0, (*e0).typ, d0, NIM_FALSE); } LA9: ; { NI64 LOC13; TY180507 LOC16; LOC13 = (NI64)0; LOC13 = getsize_322135_3876443242((*e0).typ); if (!(IL64(8) < LOC13)) goto LA14; usestringh_534345_839829468((*p0).module); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_540188_839829468((*d0)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1); { NI i_559537_839829468; NI HEX3Atmp_559603_839829468; NI LOC18; NI res_559606_839829468; i_559537_839829468 = (NI)0; HEX3Atmp_559603_839829468 = (NI)0; LOC18 = (NI)0; LOC18 = sonslen_297351_850551059(e0); HEX3Atmp_559603_839829468 = (NI)(LOC18 - ((NI) 1)); res_559606_839829468 = ((NI) 0); { while (1) { if (!(res_559606_839829468 <= HEX3Atmp_559603_839829468)) goto LA20; i_559537_839829468 = res_559606_839829468; { Ttype294840* LOC25; TY537235 LOC26; if (!((*(*e0).kindU.S6.sons->data[i_559537_839829468]).kind == ((Tnodekind294020) 44))) goto LA23; LOC25 = (Ttype294840*)0; LOC25 = getsystype_340150_3937434831(((Ttypekind294244) 31)); gettemp_539032_839829468(p0, LOC25, (&idx0), NIM_FALSE); initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_540188_839829468(idx0); LOC26[1] = rdloc_540188_839829468((*d0)); LOC26[2] = rdsetelemloc_557662_839829468(a0, (*e0).typ); LOC26[3] = rdsetelemloc_557662_839829468(b0, (*e0).typ); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4); } goto LA21; LA23: ; { TY534811 LOC28; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[i_559537_839829468], (&a0)); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = rdloc_540188_839829468((*d0)); LOC28[1] = rdsetelemloc_557662_839829468(a0, (*e0).typ); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2); } LA21: ; res_559606_839829468 += ((NI) 1); } LA20: ; } } } goto LA11; LA14: ; { NimStringDesc* ts0; NimStringDesc* LOC30; NI64 LOC31; NimStringDesc* LOC32; TY180507 LOC33; LOC30 = (NimStringDesc*)0; LOC31 = (NI64)0; LOC31 = getsize_322135_3876443242((*e0).typ); LOC32 = (NimStringDesc*)0; LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8))); LOC30 = rawNewString(LOC32->Sup.len + 2); appendString(LOC30, ((NimStringDesc*) &T839829468_45)); appendString(LOC30, LOC32); ts0 = LOC30; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_540188_839829468((*d0)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1); { NI i_559575_839829468; NI HEX3Atmp_559611_839829468; NI LOC35; NI res_559614_839829468; i_559575_839829468 = (NI)0; HEX3Atmp_559611_839829468 = (NI)0; LOC35 = (NI)0; LOC35 = sonslen_297351_850551059(e0); HEX3Atmp_559611_839829468 = (NI)(LOC35 - ((NI) 1)); res_559614_839829468 = ((NI) 0); { while (1) { if (!(res_559614_839829468 <= HEX3Atmp_559611_839829468)) goto LA37; i_559575_839829468 = res_559614_839829468; { Ttype294840* LOC42; NimStringDesc* LOC43; TY537235 LOC44; if (!((*(*e0).kindU.S6.sons->data[i_559575_839829468]).kind == ((Tnodekind294020) 44))) goto LA40; LOC42 = (Ttype294840*)0; LOC42 = getsystype_340150_3937434831(((Ttypekind294244) 31)); gettemp_539032_839829468(p0, LOC42, (&idx0), NIM_FALSE); initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); LOC43 = (NimStringDesc*)0; LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68); appendString(LOC43, ((NimStringDesc*) &T839829468_528)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_529)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_540188_839829468(idx0); LOC44[1] = rdloc_540188_839829468((*d0)); LOC44[2] = rdsetelemloc_557662_839829468(a0, (*e0).typ); LOC44[3] = rdsetelemloc_557662_839829468(b0, (*e0).typ); linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC43, LOC44, 4); } goto LA38; LA40: ; { NimStringDesc* LOC46; TY534811 LOC47; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[i_559575_839829468], (&a0)); LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36); appendString(LOC46, ((NimStringDesc*) &T839829468_530)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_531)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = rdloc_540188_839829468((*d0)); LOC47[1] = rdsetelemloc_557662_839829468(a0, (*e0).typ); linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC46, LOC47, 2); } LA38: ; res_559614_839829468 += ((NI) 1); } LA37: ; } } } LA11: ; } LA1: ; } N_NIMCALL(void, exprcomplexconst_560684_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Ttype294840* t0; Ropeobj180006* LOC1; NI id0; Ropeobj180006* tmp0; Ropeobj180006* LOC2; t0 = getuniquetype_530640_2036603609((*n0).typ); LOC1 = (Ropeobj180006*)0; LOC1 = gettypedesc_537671_839829468((*p0).module, t0); id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC2 = (Ropeobj180006*)0; LOC2 = rope_180401_2381377266(((NI64) (id0))); tmp0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC2); { TY537238 LOC7; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC7[1] = tmp0; LOC7[2] = genconstexpr_556849_839829468(p0, n0); addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3); } LA5: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA10; fillloc_534282_839829468(d0, ((Tlockind294808) 8), t0, tmp0, ((Tstorageloc294812) 1)); } goto LA8; LA10: ; { putdataintodest_552436_839829468(p0, d0, t0, tmp0); { if (!!(((*t0).kind == ((Ttypekind294244) 24) || (*t0).kind == ((Ttypekind294244) 28)))) goto LA15; (*d0).s = ((Tstorageloc294812) 1); } LA15: ; } LA8: ; } N_NIMCALL(NIM_BOOL, handleconstexpr_556853_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; NI LOC6; Ttype294840* t0; Ropeobj180006* LOC10; NI id0; Ropeobj180006* LOC11; Ropeobj180006* LOC12; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = ((*d0).k == ((Tlockind294808) 0)); if (!(LOC4)) goto LA5; LOC6 = (NI)0; LOC6 = len_295081_850551059(n0); LOC4 = (((NI) (((*n0).kind == ((Tnodekind294020) 38)))) < LOC6); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA7; LOC3 = isdeepconstexpr_320566_2616423590(n0); LA7: ; if (!LOC3) goto LA8; t0 = getuniquetype_530640_2036603609((*n0).typ); LOC10 = (Ropeobj180006*)0; LOC10 = gettypedesc_537671_839829468((*p0).module, t0); id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC11 = (Ropeobj180006*)0; LOC11 = rope_180401_2381377266(((NI64) (id0))); LOC12 = (Ropeobj180006*)0; LOC12 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC11); fillloc_534282_839829468(d0, ((Tlockind294808) 8), t0, LOC12, ((Tstorageloc294812) 1)); { TY537238 LOC17; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_537671_839829468((*p0).module, t0); LOC17[1] = (*d0).r; LOC17[2] = genconstexpr_556849_839829468(p0, n0); addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; result0 = NIM_TRUE; } goto LA1; LA8: ; { result0 = NIM_FALSE; } LA1: ; return result0; } N_NIMCALL(void, genarrayconstr_560207_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Tloc294816 arr0; memset((void*)(&arr0), 0, sizeof(arr0)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_556853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA8; gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA8: ; { NI i_560234_839829468; NI HEX3Atmp_560242_839829468; NI LOC11; NI res_560245_839829468; i_560234_839829468 = (NI)0; HEX3Atmp_560242_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = sonslen_297351_850551059(n0); HEX3Atmp_560242_839829468 = (NI)(LOC11 - ((NI) 1)); res_560245_839829468 = ((NI) 0); { while (1) { Ttype294840* LOC14; Ttype294840* LOC15; TY534811 LOC16; if (!(res_560245_839829468 <= HEX3Atmp_560242_839829468)) goto LA13; i_560234_839829468 = res_560245_839829468; LOC14 = (Ttype294840*)0; LOC14 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256)); LOC15 = (Ttype294840*)0; LOC15 = elemtype_322394_3876443242(LOC14); initloc_534273_839829468((&arr0), ((Tlockind294808) 6), LOC15, (*d0).s); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_540188_839829468((*d0)); LOC16[1] = intliteral_541270_839829468(((NI64) (i_560234_839829468))); arr0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2); expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[i_560234_839829468], (&arr0)); res_560245_839829468 += ((NI) 1); } LA13: ; } } } LA4: ; } N_NIMCALL(void, gentupleconstr_559618_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Tloc294816 rec0; memset((void*)(&rec0), 0, sizeof(rec0)); { NIM_BOOL LOC3; Ttype294840* t0; Ropeobj180006* LOC6; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_556853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; t0 = getuniquetype_530640_2036603609((*n0).typ); LOC6 = (Ropeobj180006*)0; LOC6 = gettypedesc_537671_839829468((*p0).module, t0); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA9; gettemp_539032_839829468(p0, t0, d0, NIM_FALSE); } LA9: ; { NI i_559646_839829468; NI HEX3Atmp_559803_839829468; NI LOC12; NI res_559806_839829468; i_559646_839829468 = (NI)0; HEX3Atmp_559803_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = sonslen_297351_850551059(n0); HEX3Atmp_559803_839829468 = (NI)(LOC12 - ((NI) 1)); res_559806_839829468 = ((NI) 0); { while (1) { Tnode294802* it0; TY534811 LOC19; if (!(res_559806_839829468 <= HEX3Atmp_559803_839829468)) goto LA14; i_559646_839829468 = res_559806_839829468; it0 = (*n0).kindU.S6.sons->data[i_559646_839829468]; { if (!((*it0).kind == ((Tnodekind294020) 34))) goto LA17; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA17: ; initloc_534273_839829468((&rec0), ((Tlockind294808) 6), (*it0).typ, (*d0).s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_540188_839829468((*d0)); LOC19[1] = rope_180401_2381377266(((NI64) (i_559646_839829468))); rec0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2); expr_541248_839829468(p0, it0, (&rec0)); res_559806_839829468 += ((NI) 1); } LA14: ; } } } LA4: ; } N_NIMCALL(Tsym294834*, lookupfieldagain_555153_839829468)(Tcproc531021* p0, Ttype294840* ty_555156_839829468, Tsym294834* field0, Ropeobj180006** r0) { Tsym294834* result0; Ttype294840* ty0; result0 = (Tsym294834*)0; ty0 = ty_555156_839829468; { while (1) { if (!!((ty0 == NIM_NIL))) goto LA2; ty0 = skiptypes_298099_850551059(ty0, IL64(211106247215360)); result0 = lookupinrecord_301119_2984716966((*ty0).n, (*field0).name); { if (!!((result0 == NIM_NIL))) goto LA5; goto LA1; } LA5: ; { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC9) goto LA10; LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA10: ; if (!!(LOC9)) goto LA11; add_180487_2381377266(r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; ty0 = getuniquetype_530640_2036603609((*ty0).sons->data[((NI) 0)]); } LA2: ; } LA1: ; { if (!(result0 == NIM_NIL)) goto LA15; internalerror_198100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532)); } LA15: ; return result0; } N_NIMCALL(void, genfieldcheck_555504_839829468)(Tcproc531021* p0, Tnode294802* e0, Ropeobj180006* obj0, Tsym294834* field0, Ttype294840* origty0) { Tloc294816 test0; Tloc294816 u0; Tloc294816 v0; memset((void*)(&test0), 0, sizeof(test0)); memset((void*)(&u0), 0, sizeof(u0)); memset((void*)(&v0), 0, sizeof(v0)); { NI i_555525_839829468; NI HEX3Atmp_556039_839829468; NI LOC2; NI res_556042_839829468; i_555525_839829468 = (NI)0; HEX3Atmp_556039_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(e0); HEX3Atmp_556039_839829468 = (NI)(LOC2 - ((NI) 1)); res_556042_839829468 = ((NI) 1); { while (1) { Tnode294802* it0; Tsym294834* op0; Tnode294802* disc0; Ropeobj180006* o0; Tsym294834* d0; NI id0; Tnode294802* LOC9; Ropeobj180006* strlit0; if (!(res_556042_839829468 <= HEX3Atmp_556039_839829468)) goto LA4; i_555525_839829468 = res_556042_839829468; it0 = (*e0).kindU.S6.sons->data[i_555525_839829468]; op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!((*op0).magic == ((Tmagic294524) 99))) goto LA7; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA7: ; disc0 = skipconv_330882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]); initloc_534273_839829468((&test0), ((Tlockind294808) 0), (*it0).typ, ((Tstorageloc294812) 2)); initlocexpr_541283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0)); o0 = obj0; d0 = lookupfieldagain_555153_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0); initloc_534273_839829468((&v0), ((Tlockind294808) 6), (*d0).typ, ((Tstorageloc294812) 0)); v0.r = o0; add_180487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257)); add_180482_2381377266(&v0.r, (*d0).loc.r); geninexpraux_555496_839829468(p0, it0, (&u0), (&v0), (&test0)); LOC9 = (Tnode294802*)0; LOC9 = newstrnode_295678_850551059(((Tnodekind294020) 20), (*(*field0).name).s); id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels))); { if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12; strlit0 = getstrlit_551468_839829468((*p0).module, (*(*field0).name).s); } goto LA10; LA12: ; { Ropeobj180006* LOC15; LOC15 = (Ropeobj180006*)0; LOC15 = rope_180401_2381377266(((NI64) (id0))); strlit0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC15); } LA10: ; { TY534811 LOC20; if (!((*op0).magic == ((Tmagic294524) 99))) goto LA18; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_540188_839829468(test0); LOC20[1] = strlit0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2); } goto LA16; LA18: ; { TY534811 LOC22; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = rdloc_540188_839829468(test0); LOC22[1] = strlit0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2); } LA16: ; res_556042_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genobjconstr_556903_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 tmp0; Ttype294840* t0; NIM_BOOL isref0; Ropeobj180006* r0; Ropeobj180006* LOC13; Ttype294840* ty0; { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_556853_839829468(p0, e0, d0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; memset((void*)(&tmp0), 0, sizeof(tmp0)); t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106232576256)); gettemp_539032_839829468(p0, t0, (&tmp0), NIM_FALSE); isref0 = ((*t0).kind == ((Ttypekind294244) 22)); r0 = rdloc_540188_839829468(tmp0); { Ttype294840* LOC10; TY180507 LOC11; if (!isref0) goto LA8; rawgennew_556741_839829468(p0, tmp0, NIM_NIL); LOC10 = (Ttype294840*)0; LOC10 = lastson_297377_850551059(t0); t0 = skiptypes_298099_850551059(LOC10, IL64(211106232576256)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = r0; r0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1); gcusage_556439_839829468(e0); } goto LA6; LA8: ; { constructloc_540388_839829468(p0, tmp0, NIM_FALSE); } LA6: ; LOC13 = (Ropeobj180006*)0; LOC13 = gettypedesc_537671_839829468((*p0).module, t0); ty0 = getuniquetype_530640_2036603609(t0); { NI i_556944_839829468; NI HEX3Atmp_556997_839829468; NI LOC15; NI res_557000_839829468; i_556944_839829468 = (NI)0; HEX3Atmp_556997_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = len_295081_850551059(e0); HEX3Atmp_556997_839829468 = (LOC15 - 1); res_557000_839829468 = ((NI) 1); { while (1) { Tnode294802* it0; Tloc294816 tmp20; Tsym294834* field0; if (!(res_557000_839829468 <= HEX3Atmp_556997_839829468)) goto LA17; i_556944_839829468 = res_557000_839829468; it0 = (*e0).kindU.S6.sons->data[i_556944_839829468]; memset((void*)(&tmp20), 0, sizeof(tmp20)); tmp20.r = r0; field0 = lookupfieldagain_555153_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r); { if (!((*field0).loc.r == NIM_NIL)) goto LA20; internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533)); } LA20: ; { NIM_BOOL LOC24; NI LOC25; LOC24 = (NIM_BOOL)0; LOC25 = (NI)0; LOC25 = len_295081_850551059(it0); LOC24 = (LOC25 == ((NI) 3)); if (!(LOC24)) goto LA26; LOC24 = (((*p0).options &(1U<<((NU)(((Toption171009) 2))&31U)))!=0); LA26: ; if (!LOC24) goto LA27; genfieldcheck_555504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0); } LA27: ; add_180487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257)); add_180482_2381377266(&tmp20.r, (*field0).loc.r); tmp20.k = ((Tlockind294808) 1); tmp20.t = (*field0).loc.t; { if (!isref0) goto LA31; tmp20.s = ((Tstorageloc294812) 3); } goto LA29; LA31: ; { tmp20.s = ((Tstorageloc294812) 2); } LA29: ; expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20)); res_557000_839829468 += ((NI) 1); } LA17: ; } } { if (!((*d0).k == ((Tlockind294808) 0))) goto LA36; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI294816)); } goto LA34; LA36: ; { genassignment_541264_839829468(p0, (*d0), tmp0, 0); } LA34: ; }BeforeRet: ; } N_NIMCALL(void, gencast_558537_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Ttype294840* destt0; Ttype294840* srct0; destt0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832)); srct0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; Ropeobj180006* lbl0; Tloc294816 tmp0; TY180507 LOC7; TY537238 LOC8; TY180507 LOC9; Ropeobj180006* LOC10; LOC3 = (NIM_BOOL)0; LOC3 = ((*destt0).kind >= ((Ttypekind294244) 36) && (*destt0).kind <= ((Ttypekind294244) 39) || (*destt0).kind == ((Ttypekind294244) 18) || (*destt0).kind == ((Ttypekind294244) 17) || (*destt0).kind == ((Ttypekind294244) 16) || (*destt0).kind == ((Ttypekind294244) 4)); if (LOC3) goto LA4; LOC3 = ((*srct0).kind >= ((Ttypekind294244) 36) && (*srct0).kind <= ((Ttypekind294244) 39) || (*srct0).kind == ((Ttypekind294244) 18) || (*srct0).kind == ((Ttypekind294244) 17) || (*srct0).kind == ((Ttypekind294244) 16) || (*srct0).kind == ((Ttypekind294244) 4)); LA4: ; if (!LOC3) goto LA5; (*p0).labels += ((NI) 1); lbl0 = rope_180401_2381377266(((NI64) ((*p0).labels))); memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = lbl0; tmp0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_537671_839829468((*p0).module, srct0); LOC8[1] = gettypedesc_537671_839829468((*p0).module, destt0); LOC8[2] = lbl0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3); tmp0.k = ((Tlockind294808) 6); tmp0.t = srct0; tmp0.s = ((Tstorageloc294812) 2); tmp0.flags = 0; expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = lbl0; LOC10 = (Ropeobj180006*)0; LOC10 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s); } goto LA1; LA5: ; { gensomecast_558480_839829468(p0, e0, d0); } LA1: ; } N_NIMCALL(void, genconv_558632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Ttype294840* desttype0; desttype0 = skiptypes_298099_850551059((*e0).typ, 8390656); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = comparetypes_328214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare326427) 1), 0); if (!LOC3) goto LA4; expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } goto LA1; LA4: ; { gensomecast_558480_839829468(p0, e0, d0); } LA1: ; } static N_INLINE(NIM_BOOL, iscppref_554807_839829468)(Tcproc531021* p0, Ttype294840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; NIM_BOOL LOC3; Ttype294840* LOC6; Ttype294840* LOC8; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; LOC2 = LOC3; if (!(LOC2)) goto LA5; LOC6 = (Ttype294840*)0; LOC6 = skiptypes_298099_850551059(typ0, IL64(211106232576256)); LOC2 = ((*LOC6).kind == ((Ttypekind294244) 23)); LA5: ; LOC1 = LOC2; if (!(LOC1)) goto LA7; LOC8 = (Ttype294840*)0; LOC8 = skiptypes_298099_850551059(typ0, IL64(211106232576256)); LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0)); LA7: ; result0 = LOC1; return result0; } N_NIMCALL(void, genaddr_555051_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { { Ttype294840* LOC3; Tloc294816 a0; Ropeobj180006* LOC6; LOC3 = (Ttype294840*)0; LOC3 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC3).kind == ((Ttypekind294244) 22) || (*LOC3).kind == ((Ttypekind294244) 21))) goto LA4; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC6 = (Ropeobj180006*)0; LOC6 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_52), a0.r); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } goto LA1; LA4: ; { NIM_BOOL LOC8; Tctypekind531007 LOC9; LOC8 = (NIM_BOOL)0; LOC9 = (Tctypekind531007)0; LOC9 = maptype_535393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LOC8 = (LOC9 == ((Tctypekind531007) 17)); if (LOC8) goto LA10; LOC8 = iscppref_554807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LA10: ; if (!LOC8) goto LA11; expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA11: ; { Tloc294816 a0; Ropeobj180006* LOC14; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC14 = (Ropeobj180006*)0; LOC14 = addrloc_540204_839829468(a0); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC14, a0.s); } LA1: ; } N_NIMCALL(void, genarrayelem_556093_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Ttype294840* ty0; Ttype294840* LOC1; Ropeobj180006* first0; NI64 LOC2; Ttype294840* LOC47; Ttype294840* LOC48; TY537238 LOC49; Ropeobj180006* LOC50; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, x0, (&a0)); initlocexpr_541283_839829468(p0, y0, (&b0)); LOC1 = (Ttype294840*)0; LOC1 = skiptypes_298099_850551059(a0.t, IL64(211106242013440)); ty0 = skiptypes_298099_850551059(LOC1, IL64(211106247256320)); LOC2 = (NI64)0; LOC2 = firstord_322001_3876443242(ty0); first0 = intliteral_541270_839829468(LOC2); { NIM_BOOL LOC5; LOC5 = (NIM_BOOL)0; LOC5 = (((*p0).options &(1U<<((NU)(((Toption171009) 4))&31U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0)); LA6: ; if (!LOC5) goto LA7; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = isconstexpr_320510_2616423590(y0); if (!!(LOC11)) goto LA12; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = firstord_322001_3876443242(ty0); if (!(LOC16 == IL64(0))) goto LA17; { NIM_BOOL LOC21; NI64 LOC22; NI64 LOC23; NI64 LOC25; NI64 LOC26; TY534811 LOC29; NI64 LOC30; LOC21 = (NIM_BOOL)0; LOC22 = (NI64)0; LOC22 = firstord_322001_3876443242(b0.t); LOC23 = (NI64)0; LOC23 = firstord_322001_3876443242(ty0); LOC21 = (LOC22 < LOC23); if (LOC21) goto LA24; LOC25 = (NI64)0; LOC25 = lastord_322004_3876443242(ty0); LOC26 = (NI64)0; LOC26 = lastord_322004_3876443242(b0.t); LOC21 = (LOC25 < LOC26); LA24: ; if (!LOC21) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdcharloc_540227_839829468(b0); LOC30 = (NI64)0; LOC30 = lastord_322004_3876443242(ty0); LOC29[1] = intliteral_541270_839829468(LOC30); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2); } LA27: ; } goto LA14; LA17: ; { TY537238 LOC32; NI64 LOC33; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rdcharloc_540227_839829468(b0); LOC32[1] = first0; LOC33 = (NI64)0; LOC33 = lastord_322004_3876443242(ty0); LOC32[2] = intliteral_541270_839829468(LOC33); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3); } LA14: ; } goto LA9; LA12: ; { NI64 idx0; idx0 = getordvalue_322129_3876443242(y0); { NIM_BOOL LOC37; NI64 LOC38; NI64 LOC40; LOC37 = (NIM_BOOL)0; LOC38 = (NI64)0; LOC38 = firstord_322001_3876443242(ty0); LOC37 = (idx0 < LOC38); if (LOC37) goto LA39; LOC40 = (NI64)0; LOC40 = lastord_322004_3876443242(ty0); LOC37 = (LOC40 < idx0); LA39: ; if (!LOC37) goto LA41; localerror_198080_155036129((*x0).info, ((Tmsgkind193002) 86), ((NimStringDesc*) &T839829468_490)); } LA41: ; } LA9: ; } LA7: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA45; (*d0).s = a0.s; } LA45: ; LOC47 = (Ttype294840*)0; LOC47 = skiptypes_298099_850551059(ty0, IL64(211106240964864)); LOC48 = (Ttype294840*)0; LOC48 = elemtype_322394_3876443242(LOC47); memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_540188_839829468(a0); LOC49[1] = rdcharloc_540227_839829468(b0); LOC49[2] = first0; LOC50 = (Ropeobj180006*)0; LOC50 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3); putintodest_552468_839829468(p0, d0, LOC48, LOC50, a0.s); } N_NIMCALL(void, genopenarrayelem_556169_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Ttype294840* LOC10; Ttype294840* LOC11; TY534811 LOC12; Ropeobj180006* LOC13; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, x0, (&a0)); initlocexpr_541283_839829468(p0, y0, (&b0)); { TY534811 LOC5; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 4))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(b0); LOC5[1] = rdloc_540188_839829468(a0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2); } LA3: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA8; (*d0).s = a0.s; } LA8: ; LOC10 = (Ttype294840*)0; LOC10 = skiptypes_298099_850551059(a0.t, IL64(211106240964864)); LOC11 = (Ttype294840*)0; LOC11 = elemtype_322394_3876443242(LOC10); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_540188_839829468(a0); LOC12[1] = rdcharloc_540227_839829468(b0); LOC13 = (Ropeobj180006*)0; LOC13 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2); putintodest_552468_839829468(p0, d0, LOC11, LOC13, a0.s); } N_NIMCALL(void, genseqelem_556205_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Ttype294840* ty0; Ttype294840* LOC27; Ttype294840* LOC28; TY534811 LOC29; Ropeobj180006* LOC30; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, x0, (&a0)); initlocexpr_541283_839829468(p0, y0, (&b0)); ty0 = skiptypes_298099_850551059(a0.t, IL64(211106242013440)); { Ttype294840* LOC5; if (!((*ty0).kind == ((Ttypekind294244) 22) || (*ty0).kind == ((Ttypekind294244) 21))) goto LA3; LOC5 = (Ttype294840*)0; LOC5 = lastson_297377_850551059(ty0); ty0 = skiptypes_298099_850551059(LOC5, IL64(211106242013440)); } LA3: ; { if (!(((*p0).options &(1U<<((NU)(((Toption171009) 4))&31U)))!=0)) goto LA8; { TY537238 LOC14; if (!((*ty0).kind == ((Ttypekind294244) 28))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_540188_839829468(b0); LOC14[1] = rdloc_540188_839829468(a0); LOC14[2] = lenfield_541305_839829468(p0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3); } goto LA10; LA12: ; { TY537238 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_540188_839829468(b0); LOC16[1] = rdloc_540188_839829468(a0); LOC16[2] = lenfield_541305_839829468(p0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3); } LA10: ; } LA8: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA19; (*d0).s = ((Tstorageloc294812) 3); } LA19: ; { Ttype294840* LOC23; TY180507 LOC26; LOC23 = (Ttype294840*)0; LOC23 = skiptypes_298099_850551059(a0.t, IL64(211106240964864)); if (!((*LOC23).kind == ((Ttypekind294244) 22) || (*LOC23).kind == ((Ttypekind294244) 21))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = a0.r; a0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1); } LA24: ; LOC27 = (Ttype294840*)0; LOC27 = skiptypes_298099_850551059(a0.t, IL64(211106240964864)); LOC28 = (Ttype294840*)0; LOC28 = elemtype_322394_3876443242(LOC27); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_540188_839829468(a0); LOC29[1] = rdcharloc_540227_839829468(b0); LOC30 = (Ropeobj180006*)0; LOC30 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2); putintodest_552468_839829468(p0, d0, LOC28, LOC30, a0.s); } N_NIMCALL(void, gencstringelem_556144_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) { Tloc294816 a0; Tloc294816 b0; Ttype294840* ty0; Ttype294840* LOC5; Ttype294840* LOC6; TY534811 LOC7; Ropeobj180006* LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, x0, (&a0)); initlocexpr_541283_839829468(p0, y0, (&b0)); ty0 = skiptypes_298099_850551059(a0.t, IL64(211106242013440)); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ttype294840*)0; LOC5 = skiptypes_298099_850551059(ty0, IL64(211106240964864)); LOC6 = (Ttype294840*)0; LOC6 = elemtype_322394_3876443242(LOC5); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_540188_839829468(a0); LOC7[1] = rdcharloc_540227_839829468(b0); LOC8 = (Ropeobj180006*)0; LOC8 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2); putintodest_552468_839829468(p0, d0, LOC6, LOC8, a0.s); } N_NIMCALL(void, gentupleelem_555124_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; NI i0; Ropeobj180006* LOC5; Ttype294840* ty0; Ropeobj180006* r0; TY180507 LOC8; memset((void*)(&a0), 0, sizeof(a0)); i0 = (NI)0; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!((*d0).k == ((Tlockind294808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ropeobj180006*)0; LOC5 = gettypedesc_537671_839829468((*p0).module, a0.t); ty0 = getuniquetype_530640_2036603609(a0.t); r0 = rdloc_540188_839829468(a0); switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) { case ((Tnodekind294020) 6) ... ((Tnodekind294020) 15): { i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval)); } break; default: { internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545)); } break; } memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_180401_2381377266(((NI64) (i0))); addf_181205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1); putintodest_552468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s); } N_NIMCALL(void, genbracketexpr_556277_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Ttype294840* ty0; ty0 = skiptypes_298099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); { Ttype294840* LOC5; if (!((*ty0).kind == ((Ttypekind294244) 22) || (*ty0).kind == ((Ttypekind294244) 21))) goto LA3; LOC5 = (Ttype294840*)0; LOC5 = lastson_297377_850551059(ty0); ty0 = skiptypes_298099_850551059(LOC5, IL64(211106242013440)); } LA3: ; switch ((*ty0).kind) { case ((Ttypekind294244) 16): case ((Ttypekind294244) 4): { genarrayelem_556093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind294244) 27): case ((Ttypekind294244) 48): { genopenarrayelem_556169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind294244) 24): case ((Ttypekind294244) 28): { genseqelem_556205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind294244) 29): { gencstringelem_556144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind294244) 18): { gentupleelem_555124_839829468(p0, n0, d0); } break; default: { NimStringDesc* LOC12; LOC12 = (NimStringDesc*)0; LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI294244))->Sup.len + 21); appendString(LOC12, ((NimStringDesc*) &T839829468_547)); appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI294244))); appendChar(LOC12, 41); internalerror_198100_155036129((*n0).info, LOC12); } break; } } N_NIMCALL(void, genderef_545921_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NIM_BOOL enforcederef0) { Tctypekind531007 mt0; { mt0 = maptype_535393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0); if (!(LOC3)) goto LA4; LOC3 = !(enforcederef0); LA4: ; if (!LOC3) goto LA5; expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); { Ttype294840* LOC9; LOC9 = (Ttype294840*)0; LOC9 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC9).kind == ((Ttypekind294244) 22))) goto LA10; (*d0).s = ((Tstorageloc294812) 3); } LA10: ; } goto LA1; LA5: ; { Tloc294816 a0; Ttype294840* typ0; memset((void*)(&a0), 0, sizeof(a0)); typ0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NIM_BOOL LOC15; NIM_BOOL LOC16; NIM_BOOL LOC17; NIM_BOOL LOC20; Tnode294802* LOC25; Tnode294802* LOC26; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC17 = (NIM_BOOL)0; LOC17 = ((*typ0).kind == ((Ttypekind294244) 23)); if (!(LOC17)) goto LA18; LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0)); LA18: ; LOC16 = LOC17; if (!(LOC16)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA21: ; LOC16 = LOC20; LA19: ; LOC15 = LOC16; if (!(LOC15)) goto LA22; LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 64)); LA22: ; if (!LOC15) goto LA23; LOC25 = (Tnode294802*)0; LOC25 = HEX5BHEX5D_295238_850551059(e0, ((NI) 0)); LOC26 = (Tnode294802*)0; LOC26 = HEX5BHEX5D_295238_850551059(LOC25, ((NI) 0)); initlocexprsingleuse_541289_839829468(p0, LOC26, d0); goto BeforeRet; } goto LA13; LA23: ; { initlocexprsingleuse_541289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA13: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA30; switch ((*typ0).kind) { case ((Ttypekind294244) 22): { (*d0).s = ((Tstorageloc294812) 3); } break; case ((Ttypekind294244) 23): { (*d0).s = ((Tstorageloc294812) 0); { NIM_BOOL LOC36; NIM_BOOL LOC37; NIM_BOOL LOC39; Ropeobj180006* LOC44; LOC36 = (NIM_BOOL)0; LOC37 = (NIM_BOOL)0; LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0)); if (!(LOC37)) goto LA38; LOC39 = (NIM_BOOL)0; LOC39 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC39) goto LA40; LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA40: ; LOC37 = LOC39; LA38: ; LOC36 = LOC37; if (!(LOC36)) goto LA41; LOC36 = ((*e0).kind == ((Tnodekind294020) 65)); LA41: ; if (!LOC36) goto LA42; LOC44 = (Ropeobj180006*)0; LOC44 = rdloc_540188_839829468(a0); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC44, a0.s); goto BeforeRet; } LA42: ; } break; case ((Ttypekind294244) 21): { (*d0).s = ((Tstorageloc294812) 0); } break; default: { NimStringDesc* LOC47; LOC47 = (NimStringDesc*)0; LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI294244))->Sup.len + 9); appendString(LOC47, ((NimStringDesc*) &T839829468_548)); appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI294244))); internalerror_198100_155036129((*e0).info, LOC47); } break; } } goto LA28; LA30: ; { NIM_BOOL LOC49; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA50: ; if (!LOC49) goto LA51; { NIM_BOOL LOC55; NIM_BOOL LOC56; Ropeobj180006* LOC61; LOC55 = (NIM_BOOL)0; LOC56 = (NIM_BOOL)0; LOC56 = ((*typ0).kind == ((Ttypekind294244) 23)); if (!(LOC56)) goto LA57; LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0)); LA57: ; LOC55 = LOC56; if (!(LOC55)) goto LA58; LOC55 = ((*e0).kind == ((Tnodekind294020) 65)); LA58: ; if (!LOC55) goto LA59; LOC61 = (Ropeobj180006*)0; LOC61 = rdloc_540188_839829468(a0); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC61, a0.s); goto BeforeRet; } LA59: ; } goto LA28; LA51: ; LA28: ; { NIM_BOOL LOC64; Ropeobj180006* LOC68; LOC64 = (NIM_BOOL)0; LOC64 = enforcederef0; if (!(LOC64)) goto LA65; LOC64 = (mt0 == ((Tctypekind531007) 18)); LA65: ; if (!LOC64) goto LA66; LOC68 = (Ropeobj180006*)0; LOC68 = rdloc_540188_839829468(a0); putintodest_552468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s); } goto LA62; LA66: ; { TY180507 LOC70; Ropeobj180006* LOC71; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rdloc_540188_839829468(a0); LOC71 = (Ropeobj180006*)0; LOC71 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1); putintodest_552468_839829468(p0, d0, (*e0).typ, LOC71, a0.s); } LA62: ; } LA1: ; }BeforeRet: ; } N_NIMCALL(Ttype294840*, genrecordfieldaux_555096_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tloc294816* a0) { Ttype294840* result0; Ropeobj180006* LOC9; result0 = (Ttype294840*)0; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0); { if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind294020) 3)))) goto LA3; internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549)); } LA3: ; { if (!((*d0).k == ((Tlockind294808) 0))) goto LA7; (*d0).s = (*a0).s; } LA7: ; LOC9 = (Ropeobj180006*)0; LOC9 = gettypedesc_537671_839829468((*p0).module, (*a0).t); result0 = getuniquetype_530640_2036603609((*a0).t); return result0; } N_NIMCALL(void, genrecordfield_555448_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* ty0; Ropeobj180006* r0; Tsym294834* f0; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_555096_839829468(p0, e0, d0, (&a0)); r0 = rdloc_540188_839829468(a0); f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; { TY180507 LOC5; if (!((*ty0).kind == ((Ttypekind294244) 18))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_180401_2381377266(((NI64) ((*f0).position))); addf_181205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1); putintodest_552468_839829468(p0, d0, (*f0).typ, r0, a0.s); } goto LA1; LA3: ; { Tsym294834* field0; TY180507 LOC11; field0 = lookupfieldagain_555153_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA9; internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550)); } LA9: ; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*field0).loc.r; addf_181205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1); putintodest_552468_839829468(p0, d0, (*field0).typ, r0, a0.s); } LA1: ; } N_NIMCALL(void, gencheckedrecordfield_556046_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) { { Tloc294816 a0; Ttype294840* ty0; Ropeobj180006* r0; Tsym294834* f0; Tsym294834* field0; TY180507 LOC9; Ropeobj180006* LOC10; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 2))&31U)))!=0)) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_555096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0)); r0 = rdloc_540188_839829468(a0); f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; field0 = lookupfieldagain_555153_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA7; internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532)); } LA7: ; genfieldcheck_555504_839829468(p0, e0, r0, field0, ty0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = (*field0).loc.r; LOC10 = (Ropeobj180006*)0; LOC10 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1); add_180482_2381377266(&r0, LOC10); putintodest_552468_839829468(p0, d0, (*field0).typ, r0, a0.s); } goto LA1; LA3: ; { genrecordfield_555448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } LA1: ; } N_NIMCALL(NI, startblock_545978_839829468)(Tcproc531021* p0, NimStringDesc* start0, Ropeobj180006** args0, NI args0Len0) { NI result0; result0 = (NI)0; linecg_534707_839829468(p0, ((Tcprocsection531011) 2), start0, args0, args0Len0); (*p0).labels += ((NI) 1); result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0); (*p0).blocks = (TY531095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock531019), ((NI) ((NI)(result0 + ((NI) 1))))); (*p0).blocks->data[result0].id = ((NI) ((*p0).labels)); (*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0))); (*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock)); return result0; } N_NIMCALL(Ropeobj180006*, blockbody_546025_839829468)(Tblock531019* b0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = (*b0).sections[(((Tcprocsection531011) 0))- 0]; { TY180507 LOC5; if (!(((NI16) 0) < (*b0).framelen)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_180401_2381377266(((NI64) ((*b0).framelen))); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1); } LA3: ; add_180482_2381377266(&result0, (*b0).sections[(((Tcprocsection531011) 1))- 0]); add_180482_2381377266(&result0, (*b0).sections[(((Tcprocsection531011) 2))- 0]); return result0; } N_NIMCALL(void, endblock_546035_839829468)(Tcproc531021* p0, Ropeobj180006* blockend0) { NI topblock0; Ropeobj180006* LOC1; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); LOC1 = (Ropeobj180006*)0; LOC1 = blockbody_546025_839829468((&(*p0).blocks->data[topblock0])); add_180482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection531011) 2))- 0], LOC1); (*p0).blocks = (TY531095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock531019), ((NI) (topblock0))); line_534690_839829468(p0, ((Tcprocsection531011) 2), blockend0); } N_NIMCALL(void, endblock_546060_839829468)(Tcproc531021* p0) { NI topblock0; Ropeobj180006* blockend0; NI16 framelen0; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); { TY180507 LOC5; if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).blocks->data[topblock0].label; blockend0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1); } goto LA1; LA3: ; { TY535289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); blockend0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0); } LA1: ; framelen0 = (*p0).blocks->data[topblock0].framelen; { TY180507 LOC12; if (!(((NI16) 0) < framelen0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_180401_2381377266(((NI64) (framelen0))); addf_181205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1); } LA10: ; endblock_546035_839829468(p0, blockend0); } N_NIMCALL(void, genblock_548083_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { NI oldbreakidx_548099_839829468; TY535289 LOC8; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_299440_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind294808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; oldbreakidx_548099_839829468 = (*p0).breakidx; memset((void*)LOC8, 0, sizeof(LOC8)); (*p0).breakidx = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0); { Tsym294834* sym0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA11; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; (*sym0).loc.k = ((Tlockind294808) 10); (*sym0).position = (NI)((*p0).breakidx + ((NI) 1)); } LA11: ; expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0); endblock_546060_839829468(p0); (*p0).breakidx = oldbreakidx_548099_839829468; } N_NIMCALL(void, genstmtlistexpr_560402_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { NI length0; length0 = sonslen_297351_850551059(n0); { NI i_560420_839829468; NI HEX3Atmp_560424_839829468; NI res_560427_839829468; i_560420_839829468 = (NI)0; HEX3Atmp_560424_839829468 = (NI)0; HEX3Atmp_560424_839829468 = (NI)(length0 - ((NI) 2)); res_560427_839829468 = ((NI) 0); { while (1) { if (!(res_560427_839829468 <= HEX3Atmp_560424_839829468)) goto LA3; i_560420_839829468 = res_560427_839829468; genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[i_560420_839829468]); res_560427_839829468 += ((NI) 1); } LA3: ; } } { if (!(((NI) 0) < length0)) goto LA6; expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); } LA6: ; } N_NIMCALL(void, genif_546982_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Tloc294816 a0; Ropeobj180006* lelse0; Ropeobj180006* lend0; memset((void*)(&a0), 0, sizeof(a0)); lelse0 = (Ropeobj180006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_299440_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind294808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_534823_839829468(p0, n0); lend0 = getlabel_541217_839829468(p0); { NI i_547011_839829468; NI HEX3Atmp_547435_839829468; NI LOC9; NI res_547438_839829468; i_547011_839829468 = (NI)0; HEX3Atmp_547435_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = sonslen_297351_850551059(n0); HEX3Atmp_547435_839829468 = (NI)(LOC9 - ((NI) 1)); res_547438_839829468 = ((NI) 0); { while (1) { Tnode294802* it0; if (!(res_547438_839829468 <= HEX3Atmp_547435_839829468)) goto LA11; i_547011_839829468 = res_547438_839829468; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*d0).k == ((Tlockind294808) 1)); if (!(LOC14)) goto LA15; LOC14 = isemptytype_299440_850551059((*n0).typ); LA15: ; if (!LOC14) goto LA16; (*d0).k = ((Tlockind294808) 0); } LA16: ; it0 = (*n0).kindU.S6.sons->data[i_547011_839829468]; { NI LOC20; TY535289 LOC23; NI LOC24; TY534811 LOC25; LOC20 = (NI)0; LOC20 = len_295081_850551059(it0); if (!(LOC20 == ((NI) 2))) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC24 = (NI)0; LOC24 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0); initlocexprsingleuse_541289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0)); lelse0 = getlabel_541217_839829468(p0); (*p0).labels += ((NI) 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_540188_839829468(a0); LOC25[1] = lelse0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2); { NIM_BOOL LOC28; Ropeobj180006** LOC32; Ropeobj180006** LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC28) goto LA29; LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA29: ; if (!LOC28) goto LA30; LOC32 = (Ropeobj180006**)0; LOC32 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223)); expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); LOC33 = (Ropeobj180006**)0; LOC33 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280)); } goto LA26; LA30: ; { expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); } LA26: ; endblock_546060_839829468(p0); { NI LOC37; TY180507 LOC40; LOC37 = (NI)0; LOC37 = sonslen_297351_850551059(n0); if (!(((NI) 1) < LOC37)) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = lend0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1); } LA38: ; fixlabel_541230_839829468(p0, lelse0); } goto LA18; LA21: ; { NI LOC42; TY535289 LOC45; NI LOC46; LOC42 = (NI)0; LOC42 = len_295081_850551059(it0); if (!(LOC42 == ((NI) 1))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0); endblock_546060_839829468(p0); } goto LA18; LA43: ; { internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557)); } LA18: ; res_547438_839829468 += ((NI) 1); } LA11: ; } } { NI LOC50; LOC50 = (NI)0; LOC50 = sonslen_297351_850551059(n0); if (!(((NI) 1) < LOC50)) goto LA51; fixlabel_541230_839829468(p0, lend0); } LA51: ; } N_NIMCALL(void, downconv_560581_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA5: ; { Ttype294840* dest0; Tnode294802* arg0; Ttype294840* src0; Tloc294816 a0; Ropeobj180006* r0; NIM_BOOL isref0; Ttype294840* LOC10; dest0 = skiptypes_298099_850551059((*n0).typ, IL64(211106247256320)); arg0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { while (1) { if (!((*arg0).kind == ((Tnodekind294020) 66))) goto LA9; arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)]; } LA9: ; } src0 = skiptypes_298099_850551059((*arg0).typ, IL64(211106247256320)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, arg0, (&a0)); r0 = rdloc_540188_839829468(a0); LOC10 = (Ttype294840*)0; LOC10 = skiptypes_298099_850551059((*arg0).typ, IL64(211106232576256)); isref0 = ((*LOC10).kind == ((Ttypekind294244) 22) || (*LOC10).kind == ((Ttypekind294244) 21) || (*LOC10).kind == ((Ttypekind294244) 23)); { if (!isref0) goto LA13; add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_558)); } goto LA11; LA13: ; { add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; { NI i_560650_839829468; NI HEX3Atmp_560677_839829468; NI LOC17; NI res_560680_839829468; i_560650_839829468 = (NI)0; HEX3Atmp_560677_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = inheritancediff_328252_3876443242(dest0, src0); HEX3Atmp_560677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17)); res_560680_839829468 = ((NI) 2); { while (1) { if (!(res_560680_839829468 <= HEX3Atmp_560677_839829468)) goto LA19; i_560650_839829468 = res_560680_839829468; add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); res_560680_839829468 += ((NI) 1); } LA19: ; } } { if (!isref0) goto LA22; { NIM_BOOL LOC26; Ttype294840* LOC28; TY534811 LOC31; LOC26 = (NIM_BOOL)0; LOC26 = ((*d0).k == ((Tlockind294808) 0)); if (!(LOC26)) goto LA27; LOC28 = (Ttype294840*)0; LOC28 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256)); LOC26 = ((*LOC28).kind == ((Ttypekind294244) 22) || (*LOC28).kind == ((Ttypekind294244) 21) || (*LOC28).kind == ((Ttypekind294244) 23)); LA27: ; if (!LOC26) goto LA29; gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = rdloc_540188_839829468((*d0)); LOC31[1] = r0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2); } goto LA24; LA29: ; { r0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_52), r0); putintodest_552468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA24: ; } goto LA20; LA22: ; { putintodest_552468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA20: ; } LA1: ; } N_NIMCALL(void, upconv_560431_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* dest0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); dest0 = skiptypes_298099_850551059((*n0).typ, IL64(211106247256320)); { NIM_BOOL LOC3; NIM_BOOL LOC5; Ropeobj180006* r0; Ropeobj180006* nilcheck0; Ttype294840* t0; LOC3 = (NIM_BOOL)0; LOC3 = (((*p0).options &(1U<<((NU)(((Toption171009) 1))&31U)))!=0); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isobjlackingtypefield_535513_839829468(dest0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; r0 = rdloc_540188_839829468(a0); nilcheck0 = NIM_NIL; t0 = skiptypes_298099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype294840* LOC23; if (!((*t0).kind == ((Ttypekind294244) 23) || (*t0).kind == ((Ttypekind294244) 21) || (*t0).kind == ((Ttypekind294244) 22))) goto LA9; { if (!!(((*t0).kind == ((Ttypekind294244) 23)))) goto LA12; nilcheck0 = r0; } LA12: ; { NIM_BOOL LOC16; NIM_BOOL LOC18; TY180507 LOC22; LOC16 = (NIM_BOOL)0; LOC16 = !(((*t0).kind == ((Ttypekind294244) 23))); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA19: ; LOC16 = !(LOC18); LA17: ; if (!LOC16) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = r0; r0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1); } LA20: ; LOC23 = (Ttype294840*)0; LOC23 = lastson_297377_850551059(t0); t0 = skiptypes_298099_850551059(LOC23, IL64(211106232576256)); } LA9: ; } { NIM_BOOL LOC26; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA27: ; if (!!(LOC26)) goto LA28; { while (1) { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = ((*t0).kind == ((Ttypekind294244) 17)); if (!(LOC32)) goto LA33; LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA33: ; if (!LOC32) goto LA31; add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); t0 = skiptypes_298099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA31: ; } } LA28: ; { TY537238 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = r0; LOC38[2] = gentypeinfo_537941_839829468((*p0).module, dest0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3); } goto LA34; LA36: ; { TY534811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = r0; LOC40[1] = gentypeinfo_537941_839829468((*p0).module, dest0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2); } LA34: ; } LA6: ; { TY534811 LOC45; Ropeobj180006* LOC46; if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind294244) 17)))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = gettypedesc_537671_839829468((*p0).module, (*n0).typ); LOC45[1] = rdloc_540188_839829468(a0); LOC46 = (Ropeobj180006*)0; LOC46 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC46, a0.s); } goto LA41; LA43: ; { TY534811 LOC48; Ropeobj180006* LOC49; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = gettypedesc_537671_839829468((*p0).module, dest0); LOC48[1] = addrloc_540204_839829468(a0); LOC49 = (Ropeobj180006*)0; LOC49 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC49, a0.s); } LA41: ; } N_NIMCALL(void, genrangechck_558590_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* magic0) { Tloc294816 a0; Ttype294840* dest0; memset((void*)(&a0), 0, sizeof(a0)); dest0 = skiptypes_298099_850551059((*n0).typ, IL64(211106240964864)); { NIM_BOOL LOC3; Ttype294840* LOC5; TY534811 LOC8; Ropeobj180006* LOC9; LOC3 = (NIM_BOOL)0; LOC3 = !((((*p0).options &(1U<<((NU)(((Toption171009) 3))&31U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype294840*)0; LOC5 = skiptypes_298099_850551059(dest0, 1048576); LOC3 = ((*LOC5).kind >= ((Ttypekind294244) 40) && (*LOC5).kind <= ((Ttypekind294244) 44)); LA4: ; if (!LOC3) goto LA6; initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_537671_839829468((*p0).module, dest0); LOC8[1] = rdcharloc_540227_839829468(a0); LOC9 = (Ropeobj180006*)0; LOC9 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC9, a0.s); } goto LA1; LA6: ; { TY538475 LOC11; Ropeobj180006* LOC12; initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_537671_839829468((*p0).module, dest0); LOC11[1] = rdcharloc_540227_839829468(a0); LOC11[2] = genliteral_551476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0); LOC11[3] = genliteral_551476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0); LOC11[4] = rope_180277_2381377266(magic0); LOC12 = (Ropeobj180006*)0; LOC12 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5); putintodest_552468_839829468(p0, d0, dest0, LOC12, a0.s); } LA1: ; } N_NIMCALL(void, convstrtocstr_558642_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* LOC1; TY180507 LOC2; Ropeobj180006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype294840*)0; LOC1 = skiptypes_298099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_540188_839829468(a0); LOC3 = (Ropeobj180006*)0; LOC3 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1); putintodest_552468_839829468(p0, d0, LOC1, LOC3, a0.s); } N_NIMCALL(void, convcstrtostr_558654_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { Tloc294816 a0; Ttype294840* LOC1; TY180507 LOC2; Ropeobj180006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype294840*)0; LOC1 = skiptypes_298099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_540188_839829468(a0); LOC3 = (Ropeobj180006*)0; LOC3 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1); putintodest_552468_839829468(p0, d0, LOC1, LOC3, a0.s); gcusage_556439_839829468(n0); } static N_INLINE(NIM_BOOL, isroutine_299323_850551059)(Tsym294834* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0); return result0; } static N_INLINE(NIM_BOOL, isconstclosure_559810_839829468)(Tnode294802* n0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC2)) goto LA3; LOC2 = isroutine_299323_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind294020) 23)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, genclosure_559836_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { { NIM_BOOL LOC3; Ropeobj180006* tmp0; Ropeobj180006* LOC6; TY537238 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = isconstclosure_559810_839829468(n0); if (!LOC3) goto LA4; (*(*p0).module).labels += ((NI) 1); LOC6 = (Ropeobj180006*)0; LOC6 = rope_180401_2381377266(((NI64) ((*(*p0).module).labels))); tmp0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_566), LOC6); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_537671_839829468((*p0).module, (*n0).typ); LOC7[1] = tmp0; LOC7[2] = genconstexpr_556849_839829468(p0, n0); addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3); putintodest_552468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc294812) 1)); } goto LA1; LA4: ; { Tloc294816 tmp0; Tloc294816 a0; Tloc294816 b0; TY537238 LOC14; memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0)); { Tnode294802* LOC11; LOC11 = (Tnode294802*)0; LOC11 = skipconv_330882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]); if (!((*LOC11).kind == ((Tnodekind294020) 155))) goto LA12; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567)); } LA12: ; gettemp_539032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_540188_839829468(tmp0); LOC14[1] = rdloc_540188_839829468(a0); LOC14[2] = rdloc_540188_839829468(b0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3); putlocintodest_541258_839829468(p0, d0, tmp0); } LA1: ; } static N_INLINE(Ropeobj180006*, assignlabel_546020_839829468)(Tblock531019* b0) { Ropeobj180006* result0; Ropeobj180006* LOC1; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = rope_180401_2381377266(((NI64) ((*b0).id))); unsureAsgnRef((void**) (&(*b0).label), HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC1)); result0 = (*b0).label; return result0; } N_NIMCALL(void, gencomputedgoto_547744_839829468)(Tcproc531021* p0, Tnode294802* n0) { NI casepos0; NI arraysize0; NI id0; Ropeobj180006* tmp0; TY180507 LOC27; Ropeobj180006* gotoarray0; TY534811 LOC28; TY180507 LOC33; NI topblock0; Ropeobj180006* oldbody0; Ropeobj180006* tailb0; Ropeobj180006* taila0; Tnode294802* casestmt0; Tloc294816 a_547871_839829468; TY534811 LOC41; { casepos0 = ((NI) -1); arraysize0 = (NI)0; { NI i_547768_839829468; NI HEX3Atmp_547933_839829468; NI LOC2; NI res_547936_839829468; i_547768_839829468 = (NI)0; HEX3Atmp_547933_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_295081_850551059(n0); HEX3Atmp_547933_839829468 = (LOC2 - 1); res_547936_839829468 = ((NI) 0); { while (1) { Tnode294802* it0; if (!(res_547936_839829468 <= HEX3Atmp_547933_839829468)) goto LA4; i_547768_839829468 = res_547936_839829468; it0 = (*n0).kindU.S6.sons->data[i_547768_839829468]; { NI64 asize0; if (!((*it0).kind == ((Tnodekind294020) 97))) goto LA7; { Tnode294802* LOC11; LOC11 = (Tnode294802*)0; LOC11 = lastson_297364_850551059(it0); if (!!(((*LOC11).kind == ((Tnodekind294020) 85)))) goto LA12; localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570)); goto BeforeRet; } LA12: ; casepos0 = i_547768_839829468; asize0 = lengthord_322007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); { if (!(IL64(10000) < asize0)) goto LA16; localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571)); goto BeforeRet; } LA16: ; arraysize0 = ((NI) (asize0)); { NI64 LOC20; LOC20 = (NI64)0; LOC20 = firstord_322001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); if (!!((LOC20 == IL64(0)))) goto LA21; localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572)); goto BeforeRet; } LA21: ; } LA7: ; res_547936_839829468 += ((NI) 1); } LA4: ; } } { if (!(casepos0 < ((NI) 0))) goto LA25; localerror_198085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573)); goto BeforeRet; } LA25: ; id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1)); (*p0).labels += (NI)(arraysize0 + ((NI) 1)); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rope_180401_2381377266(((NI64) (id0))); tmp0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = tmp0; LOC28[1] = rope_180401_2381377266(((NI64) (arraysize0))); gotoarray0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2); { NI i_547819_839829468; NI HEX3Atmp_547941_839829468; NI res_547944_839829468; i_547819_839829468 = (NI)0; HEX3Atmp_547941_839829468 = (NI)0; HEX3Atmp_547941_839829468 = (NI)(arraysize0 - ((NI) 1)); res_547944_839829468 = ((NI) 1); { while (1) { TY180507 LOC32; if (!(res_547944_839829468 <= HEX3Atmp_547941_839829468)) goto LA31; i_547819_839829468 = res_547944_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rope_180401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_547819_839829468)))); addf_181205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1); res_547944_839829468 += ((NI) 1); } LA31: ; } } memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rope_180401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0)))); addf_181205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1); line_534690_839829468(p0, ((Tcprocsection531011) 0), gotoarray0); topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]), NIM_NIL); { NI j_547854_839829468; NI HEX3Atmp_547949_839829468; NI HEX3Atmp_547950_839829468; NI LOC35; NI res_547953_839829468; j_547854_839829468 = (NI)0; HEX3Atmp_547949_839829468 = (NI)0; HEX3Atmp_547950_839829468 = (NI)0; HEX3Atmp_547949_839829468 = (NI)(casepos0 + ((NI) 1)); LOC35 = (NI)0; LOC35 = len_295081_850551059(n0); HEX3Atmp_547950_839829468 = (LOC35 - 1); res_547953_839829468 = HEX3Atmp_547949_839829468; { while (1) { if (!(res_547953_839829468 <= HEX3Atmp_547950_839829468)) goto LA37; j_547854_839829468 = res_547953_839829468; genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[j_547854_839829468]); res_547953_839829468 += ((NI) 1); } LA37: ; } } tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]), NIM_NIL); { NI j_547866_839829468; NI HEX3Atmp_547958_839829468; NI res_547961_839829468; j_547866_839829468 = (NI)0; HEX3Atmp_547958_839829468 = (NI)0; HEX3Atmp_547958_839829468 = (NI)(casepos0 - ((NI) 1)); res_547961_839829468 = ((NI) 0); { while (1) { if (!(res_547961_839829468 <= HEX3Atmp_547958_839829468)) goto LA40; j_547866_839829468 = res_547961_839829468; genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[j_547866_839829468]); res_547961_839829468 += ((NI) 1); } LA40: ; } } taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]), HEX26_180418_2381377266(oldbody0, taila0)); casestmt0 = (*n0).kindU.S6.sons->data[casepos0]; memset((void*)(&a_547871_839829468), 0, sizeof(a_547871_839829468)); initlocexpr_541283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_547871_839829468)); memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = rdloc_540188_839829468(a_547871_839829468); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2); { NI i_547894_839829468; NI HEX3Atmp_547977_839829468; NI LOC43; NI res_547980_839829468; i_547894_839829468 = (NI)0; HEX3Atmp_547977_839829468 = (NI)0; LOC43 = (NI)0; LOC43 = len_295081_850551059(casestmt0); HEX3Atmp_547977_839829468 = (LOC43 - 1); res_547980_839829468 = ((NI) 1); { while (1) { TY535289 LOC46; NI LOC47; Tnode294802* it0; Tnode294802* LOC57; Ropeobj180006** LOC58; Ropeobj180006** LOC59; Tloc294816 a0; TY534811 LOC60; if (!(res_547980_839829468 <= HEX3Atmp_547977_839829468)) goto LA45; i_547894_839829468 = res_547980_839829468; memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (NI)0; LOC47 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_547894_839829468]; { NI j_547910_839829468; NI HEX3Atmp_547969_839829468; NI LOC49; NI res_547972_839829468; j_547910_839829468 = (NI)0; HEX3Atmp_547969_839829468 = (NI)0; LOC49 = (NI)0; LOC49 = len_295081_850551059(it0); HEX3Atmp_547969_839829468 = (NI)(LOC49 - ((NI) 2)); res_547972_839829468 = ((NI) 0); { while (1) { NI64 val0; TY180507 LOC56; if (!(res_547972_839829468 <= HEX3Atmp_547969_839829468)) goto LA51; j_547910_839829468 = res_547972_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_547910_839829468]).kind == ((Tnodekind294020) 44))) goto LA54; localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA54: ; val0 = getordvalue_322129_3876443242((*it0).kindU.S6.sons->data[j_547910_839829468]); memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = intliteral_541270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1))); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1); res_547972_839829468 += ((NI) 1); } LA51: ; } } LOC57 = (Tnode294802*)0; LOC57 = lastson_297364_850551059(it0); genstmts_541244_839829468(p0, LOC57); LOC58 = (Ropeobj180006**)0; LOC58 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(LOC58, tailb0); LOC59 = (Ropeobj180006**)0; LOC59 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); add_180482_2381377266(LOC59, taila0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC60, 0, sizeof(LOC60)); LOC60[0] = tmp0; LOC60[1] = rdloc_540188_839829468(a0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2); endblock_546060_839829468(p0); res_547980_839829468 += ((NI) 1); } LA45: ; } } }BeforeRet: ; } N_NIMCALL(void, genwhilestmt_547984_839829468)(Tcproc531021* p0, Tnode294802* t0) { Tloc294816 a0; NI oldbreakidx_548011_839829468; TY535289 LOC1; Tnode294802* loopbody0; memset((void*)(&a0), 0, sizeof(a0)); (*p0).withinloop += ((NI) 1); genlinedir_534823_839829468(p0, t0); oldbreakidx_548011_839829468 = (*p0).breakidx; memset((void*)LOC1, 0, sizeof(LOC1)); (*p0).breakidx = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); { NIM_BOOL LOC4; Ropeobj180006* label0; TY534811 LOC8; LOC4 = (NIM_BOOL)0; LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 6))); if (LOC4) goto LA5; LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0)); LA5: ; if (!LOC4) goto LA6; label0 = assignlabel_546020_839829468((&(*p0).blocks->data[(*p0).breakidx])); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_540188_839829468(a0); LOC8[1] = label0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2); } LA6: ; loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)]; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = stmtscontainpragma_530083_2036603609(loopbody0, ((Tspecialword277003) 182)); if (!(LOC11)) goto LA12; LOC11 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 1))&7U)))!=0); LA12: ; if (!LOC11) goto LA13; { NIM_BOOL LOC17; NI LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NI)0; LOC18 = len_295081_850551059(loopbody0); LOC17 = (LOC18 == ((NI) 2)); if (!(LOC17)) goto LA19; LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)); LA19: ; if (!LOC17) goto LA20; loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)]; } LA20: ; gencomputedgoto_547744_839829468(p0, loopbody0); } goto LA9; LA13: ; { genstmts_541244_839829468(p0, loopbody0); } LA9: ; { TY535289 LOC27; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 19))&31U)))!=0)) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0); } LA25: ; endblock_546060_839829468(p0); (*p0).breakidx = oldbreakidx_548011_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, gengotovar_546258_839829468)(Tcproc531021* p0, Tnode294802* value0) { { if (!!(((*value0).kind >= ((Tnodekind294020) 5) && (*value0).kind <= ((Tnodekind294020) 15)))) goto LA3; localerror_198085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582)); } goto LA1; LA3: ; { TY180507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_180401_2381377266((*value0).kindU.S1.intval); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1); } LA1: ; } N_NIMCALL(void, varindynamiclib_540812_839829468)(Tcgen531027* m0, Tsym294834* sym0) { Tlib294820* lib0; Ropeobj180006* extname0; Ropeobj180006* tmp0; TY537235 LOC1; NimStringDesc* LOC2; TY534811 LOC3; lib0 = (*sym0).annex; extname0 = (*sym0).loc.r; loaddynamiclib_561480_839829468(m0, lib0); (*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 0))%(sizeof(NU16)*8)); tmp0 = mangledynlibproc_540816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); (*m0).labels += ((NI) 2); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC1[1] = gettypedesc_537671_839829468(m0, (*sym0).typ); LOC1[2] = (*lib0).name; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_180856_2381377266(extname0); LOC1[3] = makecstring_193638_155036129(LOC2); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = (*sym0).loc.r; LOC3[1] = gettypedesc_537671_839829468(m0, (*sym0).loc.t); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2); } N_NIMCALL(void, assignglobalvar_540819_839829468)(Tcproc531021* p0, Tsym294834* s0) { { { Ropeobj180006* LOC5; if (!((*s0).loc.k == ((Tlockind294808) 0))) goto LA3; LOC5 = (Ropeobj180006*)0; LOC5 = manglename_535205_839829468(s0); fillloc_534282_839829468((&(*s0).loc), ((Tlockind294808) 3), (*s0).typ, LOC5, ((Tstorageloc294812) 3)); } LA3: ; { Tcgen531027* q0; if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA8; q0 = findpendingmodule_534241_839829468((*p0).module, s0); { NIM_BOOL LOC12; NIM_BOOL LOC14; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*s0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; varindynamiclib_540812_839829468(q0, s0); } goto LA10; LA15: ; { asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_540816_839829468(s0)); } LA10: ; goto BeforeRet; } LA8: ; useheader_534369_839829468((*p0).module, s0); { if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA20; goto BeforeRet; } LA20: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0)) goto LA24; declarethreadvar_540676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0)); } goto LA22; LA24: ; { Ropeobj180006* decl0; Ropeobj180006* td0; decl0 = NIM_NIL; td0 = gettypedesc_537671_839829468((*p0).module, (*s0).loc.t); { TY180507 LOC43; if (!(*s0).constraint == 0) goto LA29; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0)) goto LA33; add_180487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240)); } LA33: ; add_180482_2381377266(&decl0, td0); { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 8))&31U)))!=0)) goto LA37; add_180487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121)); } LA37: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 7))&31U)))!=0)) goto LA41; add_180487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*s0).loc.r; addf_181205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1); } goto LA27; LA29: ; { NimStringDesc* LOC45; TY534811 LOC46; LOC45 = (NimStringDesc*)0; LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3); appendString(LOC45, (*(*s0).constraint).kindU.S3.strval); appendString(LOC45, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC46, 0, sizeof(LOC46)); LOC46[0] = td0; LOC46[1] = (*s0).loc.r; decl0 = HEX25_180905_2381377266(LOC45, LOC46, 2); } LA27: ; add_180482_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 9))- 0], decl0); } LA22: ; { if (!(((NI) 0) < (*p0).withinloop)) goto LA49; resetloc_540350_839829468(p0, (&(*s0).loc)); } LA49: ; { TY537238 LOC55; NimStringDesc* LOC56; NimStringDesc* LOC57; if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC56 = (NimStringDesc*)0; LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1); appendString(LOC56, (*(*(*s0).owner).name).s); appendChar(LOC56, 46); appendString(LOC56, (*(*s0).name).s); LOC57 = (NimStringDesc*)0; LOC57 = nsuNormalize(LOC56); LOC55[0] = makecstring_193638_155036129(LOC57); LOC55[1] = (*s0).loc.r; LOC55[2] = gentypeinfo_537941_839829468((*p0).module, (*s0).typ); appcg_534632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection531005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3); } LA53: ; }BeforeRet: ; } N_NIMCALL(Ropeobj180006*, gentraverseprocforglobal_540032_839829468)(Tcgen531027* m0, Tsym294834* s0) { Ropeobj180006* result0; Ropeobj180006* LOC1; Ttraversalclosure539019 c0; Tcproc531021* p0; Ropeobj180006* sloc0; Ropeobj180006* header0; TY180507 LOC8; Ropeobj180006* generatedproc0; TY537235 LOC9; Ropeobj180006** LOC10; Ropeobj180006** LOC11; Ropeobj180006** LOC12; TY180507 LOC13; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = gentypeinfo_537941_839829468(m0, (*s0).loc.t); memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_531206_3723162438(NIM_NIL, m0); sloc0 = (*s0).loc.r; result0 = gettempname_535596_839829468(m0); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = emulatedthreadvars_534949_839829468(); LA5: ; if (!LOC4) goto LA6; accessthreadlocalvar_534945_839829468(p0, s0); sloc0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_288), sloc0); } LA6: ; c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587)); c0.p = p0; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = result0; header0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1); gentraverseproc_539022_839829468((&c0), sloc0, (*s0).loc.t); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = header0; LOC10 = (Ropeobj180006**)0; LOC10 = s_531179_3723162438(p0, ((Tcprocsection531011) 0)); LOC9[1] = (*LOC10); LOC11 = (Ropeobj180006**)0; LOC11 = s_531179_3723162438(p0, ((Tcprocsection531011) 1)); LOC9[2] = (*LOC11); LOC12 = (Ropeobj180006**)0; LOC12 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); LOC9[3] = (*LOC12); generatedproc0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = header0; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, registergcroot_545762_839829468)(Tcproc531021* p0, Tsym294834* v0) { { NIM_BOOL LOC3; Ropeobj180006* prc0; Ropeobj180006** LOC7; TY180507 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((240 &(1U<<((NU)(gselectedgc_171133_2607990831)&7U)))!=0); if (!(LOC3)) goto LA4; LOC3 = containsgarbagecollectedref_322117_3876443242((*v0).loc.t); LA4: ; if (!LOC3) goto LA5; prc0 = gentraverseprocforglobal_540032_839829468((*p0).module, v0); LOC7 = (Ropeobj180006**)0; LOC7 = procsec_531194_3723162438((*(*p0).module).initproc, ((Tcprocsection531011) 1)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = prc0; appcg_534632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1); } LA5: ; } static N_INLINE(NIM_BOOL, isassignedimmediately_545781_839829468)(Tnode294802* n0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!((*n0).kind == ((Tnodekind294020) 1))) goto LA3; result0 = NIM_FALSE; goto BeforeRet; } LA3: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = isinvalidreturntype_535548_839829468((*n0).typ); if (!LOC7) goto LA8; result0 = NIM_FALSE; goto BeforeRet; } LA8: ; result0 = NIM_TRUE; }BeforeRet: ; return result0; } N_NIMCALL(void, genasgncall_545695_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) { { Ttype294840* LOC3; LOC3 = (Ttype294840*)0; LOC3 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention294002) 8))) goto LA4; genclosurecall_542452_839829468(p0, le0, ri0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_543929_839829468(p0, le0, ri0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_544616_839829468(p0, ri0, d0); } goto LA1; LA14: ; { genprefixcall_541960_839829468(p0, le0, ri0, d0); } LA1: ; poststmtactions_534942_839829468(p0); } static N_INLINE(void, loadinto_545928_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* a0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = ((*ri0).kind == ((Tnodekind294020) 27) || (*ri0).kind == ((Tnodekind294020) 29) || (*ri0).kind == ((Tnodekind294020) 30) || (*ri0).kind == ((Tnodekind294020) 31) || (*ri0).kind == ((Tnodekind294020) 26) || (*ri0).kind == ((Tnodekind294020) 28) || (*ri0).kind == ((Tnodekind294020) 32)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3))); if (LOC5) goto LA6; LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic294524) 0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; genasgncall_545695_839829468(p0, le0, ri0, a0); } goto LA1; LA7: ; { if (!((*ri0).kind == ((Tnodekind294020) 47) || (*ri0).kind == ((Tnodekind294020) 65))) goto LA10; genderef_545921_839829468(p0, ri0, a0, NIM_TRUE); } goto LA1; LA10: ; { expr_541248_839829468(p0, ri0, a0); } LA1: ; } N_NIMCALL(void, gensinglevar_546276_839829468)(Tcproc531021* p0, Tnode294802* a0) { Tsym294834* v0; Tcproc531021* targetproc0; { v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 30))&31U)))!=0)) goto LA7; gengotovar_546258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]); } LA7: ; goto BeforeRet; } LA3: ; targetproc0 = p0; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 3))&31U)))!=0)) goto LA11; { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = (((*v0).flags & 96) == 32); if (!(LOC16)) goto LA17; LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*v0).loc.flags & 72) == 0)); LA18: ; if (!LOC15) goto LA19; goto BeforeRet; } LA19: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)) goto LA23; targetproc0 = (*(*p0).module).preinitproc; } LA23: ; assignglobalvar_540819_839829468(targetproc0, v0); genobjectinit_540242_839829468((*(*p0).module).preinitproc, ((Tcprocsection531011) 1), (*v0).typ, (*v0).loc, NIM_TRUE); { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag294184) 6))&31U)))!=0); if (!(LOC27)) goto LA28; LOC27 = !((generatedheader_534201_839829468 == NIM_NIL)); LA28: ; if (!LOC27) goto LA29; genvarprototypeaux_546254_839829468(generatedheader_534201_839829468, v0); } LA29: ; registergcroot_545762_839829468(p0, v0); } goto LA9; LA11: ; { Tnode294802* value0; NIM_BOOL imm0; value0 = (*a0).kindU.S6.sons->data[((NI) 2)]; imm0 = isassignedimmediately_545781_839829468(value0); { NIM_BOOL LOC34; NIM_BOOL LOC35; NIM_BOOL LOC36; NIM_BOOL LOC38; NIM_BOOL LOC42; Ropeobj180006* decl0; Tloc294816 tmp0; LOC34 = (NIM_BOOL)0; LOC35 = (NIM_BOOL)0; LOC36 = (NIM_BOOL)0; LOC36 = imm0; if (!(LOC36)) goto LA37; LOC38 = (NIM_BOOL)0; LOC38 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC38) goto LA39; LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA39: ; LOC36 = LOC38; LA37: ; LOC35 = LOC36; if (!(LOC35)) goto LA40; LOC35 = ((*p0).splitdecls == ((NI) 0)); LA40: ; LOC34 = LOC35; if (!(LOC34)) goto LA41; LOC42 = (NIM_BOOL)0; LOC42 = containshiddenpointer_322120_3876443242((*v0).typ); LOC34 = !(LOC42); LA41: ; if (!LOC34) goto LA43; genlinedir_534823_839829468(p0, a0); decl0 = localvardecl_540532_839829468(p0, v0); memset((void*)(&tmp0), 0, sizeof(tmp0)); { NIM_BOOL LOC47; NIM_BOOL LOC48; Tnode294802* LOC50; Tnode294802* LOC52; Ropeobj180006* params0; Ttype294840* typ0; TY534811 LOC66; LOC47 = (NIM_BOOL)0; LOC48 = (NIM_BOOL)0; LOC48 = ((*value0).kind == ((Tnodekind294020) 27) || (*value0).kind == ((Tnodekind294020) 29) || (*value0).kind == ((Tnodekind294020) 30) || (*value0).kind == ((Tnodekind294020) 31) || (*value0).kind == ((Tnodekind294020) 26) || (*value0).kind == ((Tnodekind294020) 28) || (*value0).kind == ((Tnodekind294020) 32)); if (!(LOC48)) goto LA49; LOC50 = (Tnode294802*)0; LOC50 = HEX5BHEX5D_295238_850551059(value0, ((NI) 0)); LOC48 = ((*LOC50).kind == ((Tnodekind294020) 3)); LA49: ; LOC47 = LOC48; if (!(LOC47)) goto LA51; LOC52 = (Tnode294802*)0; LOC52 = HEX5BHEX5D_295238_850551059(value0, ((NI) 0)); LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 24))&31U)))!=0); LA51: ; if (!LOC47) goto LA53; params0 = (Ropeobj180006*)0; typ0 = skiptypes_298099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NI i_546619_839829468; NI HEX3Atmp_546825_839829468; NI LOC56; NI res_546828_839829468; i_546619_839829468 = (NI)0; HEX3Atmp_546825_839829468 = (NI)0; LOC56 = (NI)0; LOC56 = len_295081_850551059(value0); HEX3Atmp_546825_839829468 = (LOC56 - 1); res_546828_839829468 = ((NI) 1); { while (1) { Ropeobj180006* LOC65; if (!(res_546828_839829468 <= HEX3Atmp_546825_839829468)) goto LA58; i_546619_839829468 = res_546828_839829468; { TY535289 LOC63; Ropeobj180006* LOC64; if (!!((params0 == NIM_NIL))) goto LA61; memset((void*)LOC63, 0, sizeof(LOC63)); LOC64 = (Ropeobj180006*)0; LOC64 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0); add_180482_2381377266(&params0, LOC64); } LA61: ; LOC65 = (Ropeobj180006*)0; LOC65 = genotherarg_541277_839829468(p0, value0, i_546619_839829468, typ0); add_180482_2381377266(&params0, LOC65); res_546828_839829468 += ((NI) 1); } LA58: ; } } memset((void*)LOC66, 0, sizeof(LOC66)); LOC66[0] = decl0; LOC66[1] = params0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2); } goto LA45; LA53: ; { TY534811 LOC68; initlocexprsingleuse_541289_839829468(p0, value0, (&tmp0)); memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = decl0; LOC68[1] = rdloc_540188_839829468(tmp0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2); } LA45: ; goto BeforeRet; } LA43: ; assignlocalvar_540614_839829468(p0, v0); initlocalvar_540398_839829468(p0, v0, imm0); } LA9: ; { if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1)))) goto LA71; genlinedir_534823_839829468(targetproc0, a0); loadinto_545928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc)); } LA71: ; }BeforeRet: ; } N_NIMCALL(void, genclosurevar_546832_839829468)(Tcproc531021* p0, Tnode294802* a0) { NIM_BOOL immediateasgn0; immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1))); { Tloc294816 v0; if (!immediateasgn0) goto LA3; memset((void*)(&v0), 0, sizeof(v0)); initlocexpr_541283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0)); genlinedir_534823_839829468(p0, a0); loadinto_545928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0)); } LA3: ; } N_NIMCALL(void, genvartuple_545794_839829468)(Tcproc531021* p0, Tnode294802* n0) { Tloc294816 tup0; Tloc294816 field0; NI L0; NIM_BOOL uselowering0; Ttype294840* t0; { memset((void*)(&tup0), 0, sizeof(tup0)); memset((void*)(&field0), 0, sizeof(field0)); { if (!!(((*n0).kind == ((Tnodekind294020) 36)))) goto LA3; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA3: ; L0 = sonslen_297351_850551059(n0); uselowering0 = NIM_FALSE; { NI i_545822_839829468; NI HEX3Atmp_545905_839829468; NI res_545908_839829468; i_545822_839829468 = (NI)0; HEX3Atmp_545905_839829468 = (NI)0; HEX3Atmp_545905_839829468 = (NI)(L0 - ((NI) 3)); res_545908_839829468 = ((NI) 0); { while (1) { if (!(res_545908_839829468 <= HEX3Atmp_545905_839829468)) goto LA7; i_545822_839829468 = res_545908_839829468; { Tnode294802* LOC10; LOC10 = (Tnode294802*)0; LOC10 = HEX5BHEX5D_295238_850551059(n0, i_545822_839829468); if (!!(((*LOC10).kind == ((Tnodekind294020) 3)))) goto LA11; uselowering0 = NIM_TRUE; goto LA5; } LA11: ; res_545908_839829468 += ((NI) 1); } LA7: ; } } LA5: ; { Tnode294802* LOC17; if (!uselowering0) goto LA15; LOC17 = (Tnode294802*)0; LOC17 = lowertupleunpacking_435037_2218250499(n0, (*p0).prc); genstmts_541244_839829468(p0, LOC17); goto BeforeRet; } LA15: ; genlinedir_534823_839829468(p0, n0); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0)); t0 = getuniquetype_530640_2036603609(tup0.t); { NI i_545846_839829468; NI HEX3Atmp_545914_839829468; NI res_545917_839829468; i_545846_839829468 = (NI)0; HEX3Atmp_545914_839829468 = (NI)0; HEX3Atmp_545914_839829468 = (NI)(L0 - ((NI) 3)); res_545917_839829468 = ((NI) 0); { while (1) { if (!(res_545917_839829468 <= HEX3Atmp_545914_839829468)) goto LA20; i_545846_839829468 = res_545917_839829468; { Tsym294834* v0; v0 = (*(*n0).kindU.S6.sons->data[i_545846_839829468]).kindU.S4.sym; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 23))&31U)))!=0)) goto LA24; goto LA21; } LA24: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 3))&31U)))!=0)) goto LA28; assignglobalvar_540819_839829468(p0, v0); genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 1), (*v0).typ, (*v0).loc, NIM_TRUE); registergcroot_545762_839829468(p0, v0); } goto LA26; LA28: ; { Tnode294802* LOC31; NIM_BOOL LOC32; assignlocalvar_540614_839829468(p0, v0); LOC31 = (Tnode294802*)0; LOC31 = HEX5BHEX5D_295238_850551059(n0, (NI)(L0 - ((NI) 1))); LOC32 = (NIM_BOOL)0; LOC32 = isassignedimmediately_545781_839829468(LOC31); initlocalvar_540398_839829468(p0, v0, LOC32); } LA26: ; initloc_534273_839829468((&field0), ((Tlockind294808) 6), (*t0).sons->data[i_545846_839829468], tup0.s); { TY534811 LOC37; if (!((*t0).kind == ((Ttypekind294244) 18))) goto LA35; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_540188_839829468(tup0); LOC37[1] = rope_180401_2381377266(((NI64) (i_545846_839829468))); field0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2); } goto LA33; LA35: ; { TY534811 LOC43; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_545846_839829468]).kind == ((Tnodekind294020) 3)))) goto LA41; internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = rdloc_540188_839829468(tup0); LOC43[1] = manglerecfieldname_536361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_545846_839829468]).kindU.S4.sym, t0); field0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2); } LA33: ; putlocintodest_541258_839829468(p0, (&(*v0).loc), field0); } LA21: ; res_545917_839829468 += ((NI) 1); } LA20: ; } } }BeforeRet: ; } N_NIMCALL(void, genvarstmt_546854_839829468)(Tcproc531021* p0, Tnode294802* n0) { { NI i_546869_839829468; NI HEX3Atmp_546902_839829468; NI LOC2; NI res_546905_839829468; i_546869_839829468 = (NI)0; HEX3Atmp_546902_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(n0); HEX3Atmp_546902_839829468 = (NI)(LOC2 - ((NI) 1)); res_546905_839829468 = ((NI) 0); { while (1) { if (!(res_546905_839829468 <= HEX3Atmp_546902_839829468)) goto LA4; i_546869_839829468 = res_546905_839829468; { Tnode294802* a0; a0 = (*n0).kindU.S6.sons->data[i_546869_839829468]; { if (!((*a0).kind == ((Tnodekind294020) 125))) goto LA8; goto LA5; } LA8: ; { if (!((*a0).kind == ((Tnodekind294020) 35))) goto LA12; { if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3))) goto LA16; gensinglevar_546276_839829468(p0, a0); } goto LA14; LA16: ; { genclosurevar_546832_839829468(p0, a0); } LA14: ; } goto LA10; LA12: ; { genvartuple_545794_839829468(p0, a0); } LA10: ; } LA5: ; res_546905_839829468 += ((NI) 1); } LA4: ; } } } static N_INLINE(NIM_BOOL, emitlazily_534248_839829468)(Tsym294834* s0) { NIM_BOOL result0; NIM_BOOL LOC1; Tsym294834* LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 2))&63U)))!=0); if (LOC1) goto LA2; LOC3 = (Tsym294834*)0; LOC3 = getmodule_301123_2984716966(s0); LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag294184) 25))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, genconststmt_546909_839829468)(Tcproc531021* p0, Tnode294802* t0) { { NI i_546924_839829468; NI HEX3Atmp_546975_839829468; NI LOC2; NI res_546978_839829468; i_546924_839829468 = (NI)0; HEX3Atmp_546975_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(t0); HEX3Atmp_546975_839829468 = (NI)(LOC2 - ((NI) 1)); res_546978_839829468 = ((NI) 0); { while (1) { if (!(res_546978_839829468 <= HEX3Atmp_546975_839829468)) goto LA4; i_546924_839829468 = res_546978_839829468; { Tnode294802* it0; Tsym294834* c0; it0 = (*t0).kindU.S6.sons->data[i_546924_839829468]; { if (!((*it0).kind == ((Tnodekind294020) 125))) goto LA8; goto LA5; } LA8: ; { if (!!(((*it0).kind == ((Tnodekind294020) 102)))) goto LA12; internalerror_198100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593)); } LA12: ; c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC16; LOC16 = (NIM_BOOL)0; LOC16 = containscompiletimeonly_330721_3876443242((*c0).typ); if (!LOC16) goto LA17; goto LA5; } goto LA14; LA17: ; { NIM_BOOL LOC20; NIM_BOOL LOC21; NI LOC24; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = ((*(*c0).typ).kind == ((Ttypekind294244) 4) || (*(*c0).typ).kind == ((Ttypekind294244) 16) || (*(*c0).typ).kind == ((Ttypekind294244) 19) || (*(*c0).typ).kind == ((Ttypekind294244) 18) || (*(*c0).typ).kind == ((Ttypekind294244) 24)); if (!(LOC21)) goto LA22; LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC24 = (NI)0; LOC24 = len_295081_850551059((*c0).ast); LOC20 = !((LOC24 == ((NI) 0))); LA23: ; if (!LOC20) goto LA25; { NIM_BOOL LOC29; LOC29 = (NIM_BOOL)0; LOC29 = emitlazily_534248_839829468(c0); if (!!(LOC29)) goto LA30; requestconstimpl_541240_839829468(p0, c0); } LA30: ; } goto LA14; LA25: ; LA14: ; } LA5: ; res_546978_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, gencasestringbranch_549100_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, Ropeobj180006* labl0, Ropeobj180006** branches0, NI branches0Len0) { Tloc294816 x0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); length0 = sonslen_297351_850551059(b0); { NI i_549122_839829468; NI HEX3Atmp_549409_839829468; NI res_549412_839829468; i_549122_839829468 = (NI)0; HEX3Atmp_549409_839829468 = (NI)0; HEX3Atmp_549409_839829468 = (NI)(length0 - ((NI) 2)); res_549412_839829468 = ((NI) 0); { while (1) { NI j0; NI64 LOC4; TY537238 LOC5; if (!(res_549412_839829468 <= HEX3Atmp_549409_839829468)) goto LA3; i_549122_839829468 = res_549412_839829468; initlocexpr_541283_839829468(p0, (*b0).kindU.S6.sons->data[i_549122_839829468], (&x0)); LOC4 = (NI64)0; LOC4 = hashstring_530100_2036603609((*(*b0).kindU.S6.sons->data[i_549122_839829468]).kindU.S3.strval); j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1)))))); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(e0); LOC5[1] = rdloc_540188_839829468(x0); LOC5[2] = labl0; appcg_534632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3); res_549412_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, exprblock_546103_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { TY535289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); expr_541248_839829468(p0, n0, d0); endblock_546060_839829468(p0); } N_NIMCALL(Ropeobj180006*, gencasesecondpass_548965_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NI labid0, NI until0) { Ropeobj180006* result0; Ropeobj180006* lend0; result0 = (Ropeobj180006*)0; lend0 = getlabel_541217_839829468(p0); { NI i_548984_839829468; NI res_549017_839829468; i_548984_839829468 = (NI)0; res_549017_839829468 = ((NI) 1); { while (1) { TY180507 LOC10; if (!(res_549017_839829468 <= until0)) goto LA3; i_548984_839829468 = res_549017_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = ((*d0).k == ((Tlockind294808) 1)); if (!(LOC6)) goto LA7; LOC6 = isemptytype_299440_850551059((*t0).typ); LA7: ; if (!LOC6) goto LA8; (*d0).k = ((Tlockind294808) 0); } LA8: ; memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rope_180401_2381377266(((NI64) ((NI)(labid0 + i_548984_839829468)))); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1); { NI length0; TY180507 LOC15; if (!((*(*t0).kindU.S6.sons->data[i_548984_839829468]).kind == ((Tnodekind294020) 85))) goto LA13; length0 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i_548984_839829468]); exprblock_546103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_548984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = lend0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1); } goto LA11; LA13: ; { exprblock_546103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_548984_839829468]).kindU.S6.sons->data[((NI) 0)], d0); } LA11: ; res_549017_839829468 += ((NI) 1); } LA3: ; } } result0 = lend0; return result0; } N_NIMCALL(void, gencasegenericbranch_548910_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj180006* labl0) { Tloc294816 x0; Tloc294816 y0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); length0 = sonslen_297351_850551059(b0); { NI i_548932_839829468; NI HEX3Atmp_548958_839829468; NI res_548961_839829468; i_548932_839829468 = (NI)0; HEX3Atmp_548958_839829468 = (NI)0; HEX3Atmp_548958_839829468 = (NI)(length0 - ((NI) 2)); res_548961_839829468 = ((NI) 0); { while (1) { if (!(res_548961_839829468 <= HEX3Atmp_548958_839829468)) goto LA3; i_548932_839829468 = res_548961_839829468; { TY537235 LOC8; if (!((*(*b0).kindU.S6.sons->data[i_548932_839829468]).kind == ((Tnodekind294020) 44))) goto LA6; initlocexpr_541283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_548932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_541283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_548932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdcharloc_540227_839829468(e0); LOC8[1] = rdcharloc_540227_839829468(x0); LOC8[2] = rdcharloc_540227_839829468(y0); LOC8[3] = labl0; linecg_534707_839829468(p0, ((Tcprocsection531011) 2), rangeformat0, LOC8, 4); } goto LA4; LA6: ; { TY537238 LOC10; initlocexpr_541283_839829468(p0, (*b0).kindU.S6.sons->data[i_548932_839829468], (&x0)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdcharloc_540227_839829468(e0); LOC10[1] = rdcharloc_540227_839829468(x0); LOC10[2] = labl0; linecg_534707_839829468(p0, ((Tcprocsection531011) 2), eqformat0, LOC10, 3); } LA4: ; res_548961_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(Ropeobj180006*, genifforcaseuntil_549021_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc294816 a0) { Ropeobj180006* result0; NI labid0; result0 = (Ropeobj180006*)0; labid0 = (*p0).labels; { NI i_549042_839829468; NI res_549083_839829468; i_549042_839829468 = (NI)0; res_549083_839829468 = ((NI) 1); { while (1) { if (!(res_549083_839829468 <= until0)) goto LA3; i_549042_839829468 = res_549083_839829468; (*p0).labels += ((NI) 1); { Ropeobj180006* LOC8; Ropeobj180006* LOC9; if (!((*(*t0).kindU.S6.sons->data[i_549042_839829468]).kind == ((Tnodekind294020) 85))) goto LA6; LOC8 = (Ropeobj180006*)0; LOC8 = rope_180401_2381377266(((NI64) ((*p0).labels))); LOC9 = (Ropeobj180006*)0; LOC9 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC8); gencasegenericbranch_548910_839829468(p0, (*t0).kindU.S6.sons->data[i_549042_839829468], a0, rangeformat0, eqformat0, LOC9); } goto LA4; LA6: ; { TY180507 LOC11; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rope_180401_2381377266(((NI64) ((*p0).labels))); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1); } LA4: ; res_549083_839829468 += ((NI) 1); } LA3: ; } } { NI LOC14; NI gototarget0; TY180507 LOC17; TY180507 LOC18; LOC14 = (NI)0; LOC14 = len_295081_850551059(t0); if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15; (*p0).labels += ((NI) 1); gototarget0 = (*p0).labels; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rope_180401_2381377266(((NI64) (gototarget0))); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1); result0 = gencasesecondpass_548965_839829468(p0, t0, d0, ((NI) (labid0)), until0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_180401_2381377266(((NI64) (gototarget0))); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1); } goto LA12; LA15: ; { result0 = gencasesecondpass_548965_839829468(p0, t0, d0, ((NI) (labid0)), until0); } LA12: ; return result0; } N_NIMCALL(void, gencasegeneric_549087_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) { Tloc294816 a0; Ropeobj180006* lend0; NI LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (NI)0; LOC1 = sonslen_297351_850551059(t0); lend0 = genifforcaseuntil_549021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), a0); fixlabel_541230_839829468(p0, lend0); } N_NIMCALL(void, genstringcase_549416_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) { NI strings0; strings0 = ((NI) 0); { NI i_549434_839829468; NI HEX3Atmp_549549_839829468; NI LOC2; NI res_549552_839829468; i_549434_839829468 = (NI)0; HEX3Atmp_549549_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(t0); HEX3Atmp_549549_839829468 = (NI)(LOC2 - ((NI) 1)); res_549552_839829468 = ((NI) 1); { while (1) { if (!(res_549552_839829468 <= HEX3Atmp_549549_839829468)) goto LA4; i_549434_839829468 = res_549552_839829468; { NI LOC9; if (!((*(*t0).kindU.S6.sons->data[i_549434_839829468]).kind == ((Tnodekind294020) 85))) goto LA7; LOC9 = (NI)0; LOC9 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i_549434_839829468]); strings0 += (NI)(LOC9 - ((NI) 1)); } LA7: ; res_549552_839829468 += ((NI) 1); } LA4: ; } } { NI bitmask0; NI LOC14; TY193350* branches0; Tloc294816 a0; NI labid0; TY534811 LOC26; TY535289 LOC35; Ropeobj180006* lend0; NI LOC42; if (!(((NI) 8) < strings0)) goto LA12; LOC14 = (NI)0; LOC14 = nextpoweroftwo_101629_1009420244(strings0); bitmask0 = (NI)(LOC14 - ((NI) 1)); branches0 = (TY193350*)0; branches0 = (TY193350*) newSeq((&NTI193350), ((NI) ((NI)(bitmask0 + ((NI) 1))))); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); labid0 = (*p0).labels; { NI i_549483_839829468; NI HEX3Atmp_549559_839829468; NI LOC16; NI res_549562_839829468; i_549483_839829468 = (NI)0; HEX3Atmp_549559_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_297351_850551059(t0); HEX3Atmp_549559_839829468 = (NI)(LOC16 - ((NI) 1)); res_549562_839829468 = ((NI) 1); { while (1) { if (!(res_549562_839829468 <= HEX3Atmp_549559_839829468)) goto LA18; i_549483_839829468 = res_549562_839829468; (*p0).labels += ((NI) 1); { Ropeobj180006* LOC23; Ropeobj180006* LOC24; if (!((*(*t0).kindU.S6.sons->data[i_549483_839829468]).kind == ((Tnodekind294020) 85))) goto LA21; LOC23 = (Ropeobj180006*)0; LOC23 = rope_180401_2381377266(((NI64) ((*p0).labels))); LOC24 = (Ropeobj180006*)0; LOC24 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC23); gencasestringbranch_549100_839829468(p0, (*t0).kindU.S6.sons->data[i_549483_839829468], a0, LOC24, branches0->data, branches0->Sup.len); } goto LA19; LA21: ; { } LA19: ; res_549562_839829468 += ((NI) 1); } LA18: ; } } memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_540188_839829468(a0); LOC26[1] = rope_180401_2381377266(((NI64) (bitmask0))); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2); { NI j_549517_839829468; NI HEX3Atmp_549567_839829468; NI res_549570_839829468; j_549517_839829468 = (NI)0; HEX3Atmp_549567_839829468 = (NI)0; HEX3Atmp_549567_839829468 = (branches0 ? (branches0->Sup.len-1) : -1); res_549570_839829468 = ((NI) 0); { while (1) { if (!(res_549570_839829468 <= HEX3Atmp_549567_839829468)) goto LA29; j_549517_839829468 = res_549570_839829468; { TY534811 LOC34; if (!!((branches0->data[j_549517_839829468] == NIM_NIL))) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = intliteral_541270_839829468(((NI64) (j_549517_839829468))); LOC34[1] = branches0->data[j_549517_839829468]; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2); } LA32: ; res_549570_839829468 += ((NI) 1); } LA29: ; } } memset((void*)LOC35, 0, sizeof(LOC35)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0); { NI LOC38; TY180507 LOC41; LOC38 = (NI)0; LOC38 = sonslen_297351_850551059(t0); if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind294020) 85)))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = rope_180401_2381377266(((NI64) ((*p0).labels))); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1); } LA39: ; LOC42 = (NI)0; LOC42 = sonslen_297351_850551059(t0); lend0 = gencasesecondpass_548965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1))); fixlabel_541230_839829468(p0, lend0); } goto LA10; LA12: ; { gencasegeneric_549087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595)); } LA10: ; } N_NIMCALL(void, gengotoforcase_547673_839829468)(Tcproc531021* p0, Tnode294802* casestmt0) { { { NI i_547695_839829468; NI HEX3Atmp_547737_839829468; NI LOC2; NI res_547740_839829468; i_547695_839829468 = (NI)0; HEX3Atmp_547737_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_295081_850551059(casestmt0); HEX3Atmp_547737_839829468 = (LOC2 - 1); res_547740_839829468 = ((NI) 1); { while (1) { TY535289 LOC5; NI LOC6; Tnode294802* it0; Tnode294802* LOC16; if (!(res_547740_839829468 <= HEX3Atmp_547737_839829468)) goto LA4; i_547695_839829468 = res_547740_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NI)0; LOC6 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_547695_839829468]; { NI j_547711_839829468; NI HEX3Atmp_547730_839829468; NI LOC8; NI res_547733_839829468; j_547711_839829468 = (NI)0; HEX3Atmp_547730_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_295081_850551059(it0); HEX3Atmp_547730_839829468 = (NI)(LOC8 - ((NI) 2)); res_547733_839829468 = ((NI) 0); { while (1) { NI64 val0; TY180507 LOC15; if (!(res_547733_839829468 <= HEX3Atmp_547730_839829468)) goto LA10; j_547711_839829468 = res_547733_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_547711_839829468]).kind == ((Tnodekind294020) 44))) goto LA13; localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA13: ; val0 = getordvalue_322129_3876443242((*it0).kindU.S6.sons->data[j_547711_839829468]); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rope_180401_2381377266(val0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1); res_547733_839829468 += ((NI) 1); } LA10: ; } } LOC16 = (Tnode294802*)0; LOC16 = lastson_297364_850551059(it0); genstmts_541244_839829468(p0, LOC16); endblock_546060_839829468(p0); res_547740_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; } N_NIMCALL(NIM_BOOL, branchhastoobigrange_549575_839829468)(Tnode294802* b0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { NI i_549590_839829468; NI HEX3Atmp_549608_839829468; NI LOC2; NI res_549611_839829468; i_549590_839829468 = (NI)0; HEX3Atmp_549608_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(b0); HEX3Atmp_549608_839829468 = (NI)(LOC2 - ((NI) 2)); res_549611_839829468 = ((NI) 0); { while (1) { if (!(res_549611_839829468 <= HEX3Atmp_549608_839829468)) goto LA4; i_549590_839829468 = res_549611_839829468; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*b0).kindU.S6.sons->data[i_549590_839829468]).kind == ((Tnodekind294020) 44)); if (!(LOC7)) goto LA8; LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_549590_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_549590_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval)); LA8: ; if (!LOC7) goto LA9; result0 = NIM_TRUE; goto BeforeRet; } LA9: ; res_549611_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; return result0; } N_NIMCALL(NI, ifswitchsplitpoint_549615_839829468)(Tcproc531021* p0, Tnode294802* n0) { NI result0; result0 = (NI)0; { NI i_549630_839829468; NI HEX3Atmp_549654_839829468; NI LOC2; NI res_549657_839829468; i_549630_839829468 = (NI)0; HEX3Atmp_549654_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_295081_850551059(n0); HEX3Atmp_549654_839829468 = (NI)(LOC2 - ((NI) 1)); res_549657_839829468 = ((NI) 1); { while (1) { Tnode294802* branch0; Tnode294802* stmtblock0; if (!(res_549657_839829468 <= HEX3Atmp_549654_839829468)) goto LA4; i_549630_839829468 = res_549657_839829468; branch0 = HEX5BHEX5D_295238_850551059(n0, i_549630_839829468); stmtblock0 = lastson_297364_850551059(branch0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = stmtscontainpragma_530083_2036603609(stmtblock0, ((Tspecialword277003) 181)); if (!LOC7) goto LA8; result0 = i_549630_839829468; } goto LA5; LA8: ; { if (!!(((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 0))&7U)))!=0))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ((*branch0).kind == ((Tnodekind294020) 85)); if (!(LOC15)) goto LA16; LOC15 = branchhastoobigrange_549575_839829468(branch0); LA16: ; if (!LOC15) goto LA17; result0 = i_549630_839829468; } LA17: ; } goto LA5; LA11: ; LA5: ; res_549657_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genordinalcase_549724_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { NI splitpoint0; Tloc294816 a0; Ropeobj180006* lend0; splitpoint0 = ifswitchsplitpoint_549615_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!(((NI) 0) < splitpoint0)) goto LA3; lend0 = genifforcaseuntil_549021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, a0); } goto LA1; LA3: ; { lend0 = NIM_NIL; } LA1: ; { NI LOC8; TY180507 LOC11; NIM_BOOL hasdefault0; TY535289 LOC37; LOC8 = (NI)0; LOC8 = len_295081_850551059(n0); if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdcharloc_540227_839829468(a0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1); hasdefault0 = NIM_FALSE; { NI i_549757_839829468; NI HEX3Atmp_549816_839829468; NI HEX3Atmp_549817_839829468; NI LOC13; NI res_549820_839829468; i_549757_839829468 = (NI)0; HEX3Atmp_549816_839829468 = (NI)0; HEX3Atmp_549817_839829468 = (NI)0; HEX3Atmp_549816_839829468 = (NI)(splitpoint0 + ((NI) 1)); LOC13 = (NI)0; LOC13 = len_295081_850551059(n0); HEX3Atmp_549817_839829468 = (LOC13 - 1); res_549820_839829468 = HEX3Atmp_549816_839829468; { while (1) { Tnode294802* branch0; Tnode294802* LOC28; TY535289 LOC29; if (!(res_549820_839829468 <= HEX3Atmp_549817_839829468)) goto LA15; i_549757_839829468 = res_549820_839829468; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = ((*d0).k == ((Tlockind294808) 1)); if (!(LOC18)) goto LA19; LOC18 = isemptytype_299440_850551059((*n0).typ); LA19: ; if (!LOC18) goto LA20; (*d0).k = ((Tlockind294808) 0); } LA20: ; branch0 = HEX5BHEX5D_295238_850551059(n0, i_549757_839829468); { if (!((*branch0).kind == ((Tnodekind294020) 85))) goto LA24; gencaserange_539028_839829468(p0, branch0); } goto LA22; LA24: ; { TY535289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0); hasdefault0 = NIM_TRUE; } LA22: ; LOC28 = (Tnode294802*)0; LOC28 = lastson_297364_850551059(branch0); exprblock_546103_839829468(p0, LOC28, d0); memset((void*)LOC29, 0, sizeof(LOC29)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0); res_549820_839829468 += ((NI) 1); } LA15: ; } } { NIM_BOOL LOC32; TY535289 LOC36; LOC32 = (NIM_BOOL)0; LOC32 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 3))&7U)))!=0); if (!(LOC32)) goto LA33; LOC32 = !(hasdefault0); LA33: ; if (!LOC32) goto LA34; memset((void*)LOC36, 0, sizeof(LOC36)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0); } LA34: ; memset((void*)LOC37, 0, sizeof(LOC37)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0); } LA9: ; { if (!!((lend0 == NIM_NIL))) goto LA40; fixlabel_541230_839829468(p0, lend0); } LA40: ; } N_NIMCALL(void, gencase_549826_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) { Ttype294840* LOC8; genlinedir_534823_839829468(p0, t0); { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_299440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind294808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (Ttype294840*)0; LOC8 = skiptypes_298099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); switch ((*LOC8).kind) { case ((Ttypekind294244) 28): { genstringcase_549416_839829468(p0, t0, d0); } break; case ((Ttypekind294244) 36) ... ((Ttypekind294244) 39): { gencasegeneric_549087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601)); } break; default: { { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC14)) goto LA15; LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 30))&31U)))!=0); LA15: ; if (!LOC14) goto LA16; gengotoforcase_547673_839829468(p0, t0); } goto LA12; LA16: ; { genordinalcase_549724_839829468(p0, t0, d0); } LA12: ; } break; } } static N_INLINE(Tnode294802*, pop_320246_1689653243)(Tnodeseq294796** s0) { Tnode294802* result0; NI L0; result0 = (Tnode294802*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (Tnodeseq294796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode294802*), ((NI) (L0))); return result0; } N_NIMCALL(void, blockleaveactions_547442_839829468)(Tcproc531021* p0, NI howmanytrys0, NI howmanyexcepts0) { Tnodeseq294796* stack0; NI alreadypoppedcnt0; stack0 = (Tnodeseq294796*)0; stack0 = (Tnodeseq294796*) newSeq((&NTI294796), ((NI) 0)); alreadypoppedcnt0 = (*p0).inexceptblock; { NI i_547471_839829468; NI res_547596_839829468; i_547471_839829468 = (NI)0; res_547596_839829468 = ((NI) 1); { while (1) { Tnode294802* trystmt0; Tnode294802* finallystmt0; if (!(res_547596_839829468 <= howmanytrys0)) goto LA3; i_547471_839829468 = res_547596_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA7: ; if (!!(LOC6)) goto LA8; { if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12; alreadypoppedcnt0 -= ((NI) 1); } goto LA10; LA12: ; { TY535289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0); } LA10: ; } LA8: ; trystmt0 = pop_320246_1689653243((&(*p0).nestedtrystmts)); stack0 = (Tnodeseq294796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode294802*)); asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0); ++stack0->Sup.len; finallystmt0 = lastson_297364_850551059(trystmt0); { if (!((*finallystmt0).kind == ((Tnodekind294020) 107))) goto LA18; genstmts_541244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]); } LA18: ; res_547596_839829468 += ((NI) 1); } LA3: ; } } { NI i_547546_839829468; NI HEX3Atmp_547601_839829468; NI res_547604_839829468; i_547546_839829468 = (NI)0; HEX3Atmp_547601_839829468 = (NI)0; HEX3Atmp_547601_839829468 = (NI)(howmanytrys0 - ((NI) 1)); res_547604_839829468 = HEX3Atmp_547601_839829468; { while (1) { if (!(((NI) 0) <= res_547604_839829468)) goto LA22; i_547546_839829468 = res_547604_839829468; (*p0).nestedtrystmts = (Tnodeseq294796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode294802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_547546_839829468]); ++(*p0).nestedtrystmts->Sup.len; res_547604_839829468 -= ((NI) 1); } LA22: ; } } { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC25) goto LA26; LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA26: ; if (!!(LOC25)) goto LA27; { NI i_547587_839829468; NI HEX3Atmp_547610_839829468; NI res_547613_839829468; i_547587_839829468 = (NI)0; HEX3Atmp_547610_839829468 = (NI)0; HEX3Atmp_547610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1)); res_547613_839829468 = HEX3Atmp_547610_839829468; { while (1) { TY535289 LOC32; if (!(((NI) 0) <= res_547613_839829468)) goto LA31; i_547587_839829468 = res_547613_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0); res_547613_839829468 -= ((NI) 1); } LA31: ; } } } LA27: ; } N_NIMCALL(void, genreturnstmt_547617_839829468)(Tcproc531021* p0, Tnode294802* t0) { TY535289 LOC14; { { if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag294427) 14))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; (*p0).beforeretneeded = NIM_TRUE; genlinedir_534823_839829468(p0, t0); { if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA7; genstmts_541244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; blockleaveactions_547442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock); { Ropeobj180006* safepoint0; TY180507 LOC13; if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11; safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))]; memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1); } LA11: ; memset((void*)LOC14, 0, sizeof(LOC14)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0); }BeforeRet: ; } N_NIMCALL(void, genbreakstmt_548444_839829468)(Tcproc531021* p0, Tnode294802* t0) { NI idx0; Ropeobj180006* label0; TY180507 LOC16; idx0 = (*p0).breakidx; { Tsym294834* sym0; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA3; sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; idx0 = (NI)((*sym0).position - ((NI) 1)); } goto LA1; LA3: ; { { while (1) { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (((NI) 0) <= idx0); if (!(LOC8)) goto LA9; LOC8 = !((*p0).blocks->data[idx0].isloop); LA9: ; if (!LOC8) goto LA7; idx0 -= ((NI) 1); } LA7: ; } { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (idx0 < ((NI) 0)); if (LOC12) goto LA13; LOC12 = !((*p0).blocks->data[idx0].isloop); LA13: ; if (!LOC12) goto LA14; internalerror_198100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609)); } LA14: ; } LA1: ; label0 = assignlabel_546020_839829468((&(*p0).blocks->data[idx0])); blockleaveactions_547442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts)))); genlinedir_534823_839829468(p0, t0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = label0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1); } N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_551080_839829468)(Tcproc531021* p0, Tnode294802* asgn0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { Tnode294802* le0; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 2))&31U)))!=0)) goto LA3; le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)]; { Tsym294834* field0; if (!((*le0).kind == ((Tnodekind294020) 46))) goto LA7; field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag294184) 18))&31U)))!=0); } goto LA5; LA7: ; { Tsym294834* field0; if (!((*le0).kind == ((Tnodekind294020) 45))) goto LA10; field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag294184) 18))&31U)))!=0); } goto LA5; LA10: ; LA5: ; } LA3: ; return result0; } N_NIMCALL(Ropeobj180006*, discriminatortabledecl_538094_839829468)(Tcgen531027* m0, Ttype294840* objtype0, Tsym294834* d0) { Ropeobj180006* result0; Ropeobj180006* LOC1; Ropeobj180006* tmp0; TY534811 LOC2; NI64 LOC3; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_130)); tmp0 = discriminatortablename_538057_839829468(m0, objtype0, d0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = tmp0; LOC3 = (NI64)0; LOC3 = lengthord_322007_3876443242((*d0).typ); LOC2[1] = rope_180401_2381377266((NI64)(LOC3 + IL64(1))); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2); return result0; } N_NIMCALL(void, gendiscriminantcheck_551144_839829468)(Tcproc531021* p0, Tloc294816 a0, Tloc294816 tmp0, Ttype294840* objtype0, Tsym294834* field0) { Ttype294840* t0; Ropeobj180006* LOC1; NI64 L0; TY537235 LOC8; t0 = skiptypes_298099_850551059(objtype0, IL64(211106240964864)); LOC1 = (Ropeobj180006*)0; LOC1 = gentypeinfo_537941_839829468((*p0).module, t0); L0 = lengthord_322007_3876443242((*field0).typ); { NIM_BOOL LOC4; TY180507 LOC7; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_270862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id); if (!!(LOC4)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = discriminatortabledecl_538094_839829468((*p0).module, t0, field0); appcg_534640_839829468((*p0).module, ((Tcfilesection531005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1); } LA5: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_540188_839829468(a0); LOC8[1] = rdloc_540188_839829468(tmp0); LOC8[2] = discriminatortablename_538057_839829468((*p0).module, t0, field0); LOC8[3] = intliteral_541270_839829468((NI64)(L0 + IL64(1))); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4); } N_NIMCALL(void, asgnfielddiscriminant_551209_839829468)(Tcproc531021* p0, Tnode294802* e0) { Tloc294816 a0; Tloc294816 tmp0; Tnode294802* dotexpr0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)]; { if (!((*dotexpr0).kind == ((Tnodekind294020) 46))) goto LA3; dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); gettemp_539032_839829468(p0, a0.t, (&tmp0), NIM_FALSE); expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); gendiscriminantcheck_551144_839829468(p0, a0, tmp0, (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym); genassignment_541264_839829468(p0, a0, tmp0, 0); } N_NIMCALL(void, genasgn_551239_839829468)(Tcproc531021* p0, Tnode294802* e0, NIM_BOOL fastasgn0) { genlinedir_534823_839829468(p0, e0); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)); if (!(LOC3)) goto LA4; LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 30))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; gengotovar_546258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA5: ; { NIM_BOOL LOC8; Tloc294816 a0; LOC8 = (NIM_BOOL)0; LOC8 = fielddiscriminantcheckneeded_551080_839829468(p0, e0); if (!!(LOC8)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); { Tnode294802* LOC13; Tnode294802* LOC16; LOC13 = (Tnode294802*)0; LOC13 = HEX5BHEX5D_295238_850551059(e0, ((NI) 0)); if (!((*LOC13).kind == ((Tnodekind294020) 47) || (*LOC13).kind == ((Tnodekind294020) 65))) goto LA14; LOC16 = (Tnode294802*)0; LOC16 = HEX5BHEX5D_295238_850551059(e0, ((NI) 0)); genderef_545921_839829468(p0, LOC16, (&a0), NIM_TRUE); } goto LA11; LA14: ; { initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA11: ; { if (!fastasgn0) goto LA20; a0.flags |= ((NU16)1)<<((((Tlocflag294810) 2))%(sizeof(NU16)*8)); } LA20: ; loadinto_545928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); } goto LA1; LA9: ; { asgnfielddiscriminant_551209_839829468(p0, e0); } LA1: ; } N_NIMCALL(Ropeobj180006*, genasmoremitstmt_550529_839829468)(Tcproc531021* p0, Tnode294802* t0, NIM_BOOL isasmstmt0) { Ropeobj180006* result0; NimStringDesc* res0; result0 = (Ropeobj180006*)0; res0 = copyString(((NimStringDesc*) &T839829468_490)); { NI i_550547_839829468; NI HEX3Atmp_550644_839829468; NI LOC2; NI res_550647_839829468; i_550547_839829468 = (NI)0; HEX3Atmp_550644_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(t0); HEX3Atmp_550644_839829468 = (NI)(LOC2 - ((NI) 1)); res_550647_839829468 = ((NI) 0); { while (1) { if (!(res_550647_839829468 <= HEX3Atmp_550644_839829468)) goto LA4; i_550547_839829468 = res_550647_839829468; switch ((*(*t0).kindU.S6.sons->data[i_550547_839829468]).kind) { case ((Tnodekind294020) 20) ... ((Tnodekind294020) 22): { res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_550547_839829468]).kindU.S3.strval->Sup.len + 0); appendString(res0, (*(*t0).kindU.S6.sons->data[i_550547_839829468]).kindU.S3.strval); } break; case ((Tnodekind294020) 3): { Tsym294834* sym0; sym0 = (*(*t0).kindU.S6.sons->data[i_550547_839829468]).kindU.S4.sym; { Tloc294816 a0; Ropeobj180006* LOC11; NimStringDesc* LOC12; if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[i_550547_839829468], (&a0)); LOC11 = (Ropeobj180006*)0; LOC11 = rdloc_540188_839829468(a0); LOC12 = (NimStringDesc*)0; LOC12 = HEX24_180856_2381377266(LOC11); res0 = resizeString(res0, LOC12->Sup.len + 0); appendString(res0, LOC12); } goto LA7; LA9: ; { Ropeobj180006* LOC16; NimStringDesc* LOC17; if (!((*sym0).kind == ((Tsymkind294435) 7))) goto LA14; LOC16 = (Ropeobj180006*)0; LOC16 = gettypedesc_537671_839829468((*p0).module, (*sym0).typ); LOC17 = (NimStringDesc*)0; LOC17 = HEX24_180856_2381377266(LOC16); res0 = resizeString(res0, LOC17->Sup.len + 0); appendString(res0, LOC17); } goto LA7; LA14: ; { Ropeobj180006* r0; NimStringDesc* LOC23; r0 = (*sym0).loc.r; { if (!(r0 == NIM_NIL)) goto LA21; r0 = manglename_535205_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), r0); } LA21: ; LOC23 = (NimStringDesc*)0; LOC23 = HEX24_180856_2381377266(r0); res0 = resizeString(res0, LOC23->Sup.len + 0); appendString(res0, LOC23); } LA7: ; } break; default: { internalerror_198100_155036129((*(*t0).kindU.S6.sons->data[i_550547_839829468]).info, ((NimStringDesc*) &T839829468_612)); } break; } res_550647_839829468 += ((NI) 1); } LA4: ; } } { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = isasmstmt0; if (!(LOC27)) goto LA28; LOC27 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 5))&7U)))!=0); LA28: ; if (!LOC27) goto LA29; { NimStringDesc* x_550604_839829468; NI first_550656_839829468; NI last_550658_839829468; x_550604_839829468 = (NimStringDesc*)0; first_550656_839829468 = ((NI) 0); last_550658_839829468 = ((NI) 0); { while (1) { NI j0; { while (1) { if (!!((((NU8)(res0->data[last_550658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_550658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_550658_839829468])) == ((NU8)(10))))) goto LA35; last_550658_839829468 += ((NI) 1); } LA35: ; } x_550604_839829468 = copyStrLast(res0, first_550656_839829468, (NI)(last_550658_839829468 - ((NI) 1))); j0 = ((NI) 0); { while (1) { if (!(((NU8)(x_550604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_550604_839829468->data[j0])) == ((NU8)(9)))) goto LA37; j0 += ((NI) 1); } LA37: ; } { if (!(((NU8)(x_550604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_550604_839829468->data[j0])) == ((NU8)(58)))) goto LA40; add_180487_2381377266(&result0, x_550604_839829468); add_180487_2381377266(&result0, tnl_178644_4151366050); } goto LA38; LA40: ; { if (!!(((NU8)(x_550604_839829468->data[j0]) == (NU8)(0)))) goto LA43; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_613)); add_180487_2381377266(&result0, x_550604_839829468); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_614)); } goto LA38; LA43: ; LA38: ; { if (!((NU8)(res0->data[last_550658_839829468]) == (NU8)(10))) goto LA47; last_550658_839829468 += ((NI) 1); } goto LA45; LA47: ; { if (!((NU8)(res0->data[last_550658_839829468]) == (NU8)(13))) goto LA50; last_550658_839829468 += ((NI) 1); { if (!((NU8)(res0->data[last_550658_839829468]) == (NU8)(10))) goto LA54; last_550658_839829468 += ((NI) 1); } LA54: ; } goto LA45; LA50: ; { goto LA32; } LA45: ; first_550656_839829468 = last_550658_839829468; } } LA32: ; } } goto LA25; LA29: ; { res0 = resizeString(res0, tnl_178644_4151366050->Sup.len + 0); appendString(res0, tnl_178644_4151366050); result0 = rope_180277_2381377266(res0); } LA25: ; return result0; } N_NIMCALL(void, genasmstmt_550659_839829468)(Tcproc531021* p0, Tnode294802* t0) { Ropeobj180006* s0; genlinedir_534823_839829468(p0, t0); s0 = genasmoremitstmt_550529_839829468(p0, t0, NIM_TRUE); { TY180507 LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = s0; addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 7))- 0], Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field17, LOC5, 1); } goto LA1; LA3: ; { TY180507 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = s0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field17, LOC7, 1); } LA1: ; } static N_INLINE(void, gensimpleblock_546095_839829468)(Tcproc531021* p0, Tnode294802* stmts0) { TY535289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); genstmts_541244_839829468(p0, stmts0); endblock_546060_839829468(p0); } N_NIMCALL(void, gentrycpp_549865_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) { Ropeobj180006* exc0; TY535289 LOC16; NI LOC17; NI length0; TY180507 LOC18; Ropeobj180006* LOC19; NI i0; NIM_BOOL catchallpresent0; TY535289 LOC78; Tnode294802* LOC79; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_299440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind294808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_534823_839829468(p0, t0); exc0 = gettempname_535596_839829468((*p0).module); { Tsym294834* LOC10; Ropeobj180006* LOC13; LOC10 = (Tsym294834*)0; LOC10 = getcompilerproc_340746_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC10 == NIM_NIL))) goto LA11; LOC13 = (Ropeobj180006*)0; LOC13 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA8; LA11: ; { Ropeobj180006* LOC15; LOC15 = (Ropeobj180006*)0; LOC15 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA8: ; (*p0).nestedtrystmts = (Tnodeseq294796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode294802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (NI)0; LOC17 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0); expr_541248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); length0 = sonslen_297351_850551059(t0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = exc0; LOC19 = (Ropeobj180006*)0; LOC19 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1); endblock_546035_839829468(p0, LOC19); { TY535289 LOC24; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0); } LA22: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); catchallpresent0 = NIM_FALSE; { while (1) { NIM_BOOL LOC27; NI blen0; LOC27 = (NIM_BOOL)0; LOC27 = (i0 < length0); if (!(LOC27)) goto LA28; LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 87)); LA28: ; if (!LOC27) goto LA26; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = ((*d0).k == ((Tlockind294808) 1)); if (!(LOC31)) goto LA32; LOC31 = isemptytype_299440_850551059((*t0).typ); LA32: ; if (!LOC31) goto LA33; (*d0).k = ((Tlockind294808) 0); } LA33: ; blen0 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i0]); { Ropeobj180006** LOC39; TY535289 LOC40; if (!(((NI) 1) < i0)) goto LA37; LOC39 = (Ropeobj180006**)0; LOC39 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); memset((void*)LOC40, 0, sizeof(LOC40)); addf_181205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0); } LA37: ; { TY535289 LOC45; NI LOC46; TY535289 LOC47; if (!(blen0 == ((NI) 1))) goto LA43; catchallpresent0 = NIM_TRUE; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0); endblock_546060_839829468(p0); } goto LA41; LA43: ; { Ropeobj180006* orexpr0; TY180507 LOC57; TY535289 LOC58; NI LOC59; TY535289 LOC60; orexpr0 = NIM_NIL; { NI j_549978_839829468; NI HEX3Atmp_550101_839829468; NI res_550104_839829468; j_549978_839829468 = (NI)0; HEX3Atmp_550101_839829468 = (NI)0; HEX3Atmp_550101_839829468 = (NI)(blen0 - ((NI) 2)); res_550104_839829468 = ((NI) 0); { while (1) { TY534811 LOC56; if (!(res_550104_839829468 <= HEX3Atmp_550101_839829468)) goto LA51; j_549978_839829468 = res_550104_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA54; add_180487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA54: ; memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = exc0; LOC56[1] = gentypeinfo_537941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_549978_839829468]).typ); appcg_534632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2); res_550104_839829468 += ((NI) 1); } LA51: ; } } memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = orexpr0; linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1); memset((void*)LOC58, 0, sizeof(LOC58)); LOC59 = (NI)0; LOC59 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0); expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC60, 0, sizeof(LOC60)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0); endblock_546060_839829468(p0); } LA41: ; i0 += ((NI) 1); } LA26: ; } { TY535289 LOC70; NI LOC71; Tnode294802* finallyblock0; TY535289 LOC76; Ropeobj180006* LOC77; if (!!(catchallpresent0)) goto LA63; { TY535289 LOC69; if (!(((NI) 1) < i0)) goto LA67; memset((void*)LOC69, 0, sizeof(LOC69)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0); } LA67: ; memset((void*)LOC70, 0, sizeof(LOC70)); LOC71 = (NI)0; LOC71 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0); finallyblock0 = lastson_297364_850551059(t0); { if (!((*finallyblock0).kind == ((Tnodekind294020) 107))) goto LA74; genstmts_541244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA74: ; memset((void*)LOC76, 0, sizeof(LOC76)); LOC77 = (Ropeobj180006*)0; LOC77 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0); line_534690_839829468(p0, ((Tcprocsection531011) 2), LOC77); endblock_546060_839829468(p0); } LA63: ; memset((void*)LOC78, 0, sizeof(LOC78)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0); (*p0).inexceptblock -= ((NI) 1); LOC79 = (Tnode294802*)0; LOC79 = pop_320246_1689653243((&(*p0).nestedtrystmts)); { NIM_BOOL LOC82; LOC82 = (NIM_BOOL)0; LOC82 = (i0 < length0); if (!(LOC82)) goto LA83; LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 107)); LA83: ; if (!LOC82) goto LA84; gensimpleblock_546095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); } LA84: ; } N_NIMCALL(void, line_534695_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* r0) { Ropeobj180006** LOC1; Ropeobj180006* LOC2; Ropeobj180006* LOC3; LOC1 = (Ropeobj180006**)0; LOC1 = s_531179_3723162438(p0, s0); LOC2 = (Ropeobj180006*)0; LOC2 = rope_180277_2381377266(r0); LOC3 = (Ropeobj180006*)0; LOC3 = indentline_534656_839829468(p0, LOC2); add_180482_2381377266(LOC1, LOC3); } static N_INLINE(Ropeobj180006*, pop_180530_1689653243)(TY193350** s0) { Ropeobj180006* result0; NI L0; result0 = (Ropeobj180006*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (TY193350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj180006*), ((NI) (L0))); return result0; } N_NIMCALL(void, gentry_550114_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) { NIM_BOOL LOC8; Ropeobj180006* safepoint0; TY180507 LOC17; TY180507 LOC18; TY180507 LOC37; NI LOC38; NI length0; TY535289 LOC39; TY535289 LOC40; NI LOC41; TY535289 LOC42; NI i0; Tnode294802* LOC95; TY180507 LOC103; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_299440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind294808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (NIM_BOOL)0; LOC8 = includestr_148249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624)); genlinedir_534823_839829468(p0, t0); safepoint0 = gettempname_535596_839829468((*p0).module); { Tsym294834* LOC11; Ropeobj180006* LOC14; LOC11 = (Tsym294834*)0; LOC11 = getcompilerproc_340746_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC11 == NIM_NIL))) goto LA12; LOC14 = (Ropeobj180006*)0; LOC14 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA9; LA12: ; { Ropeobj180006* LOC16; LOC16 = (Ropeobj180006*)0; LOC16 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA9: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1); { NIM_BOOL LOC21; TY180507 LOC24; LOC21 = (NIM_BOOL)0; LOC21 = isdefined_202011_1967573533(((NimStringDesc*) &T839829468_627)); if (!LOC21) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1); } goto LA19; LA22: ; { NIM_BOOL LOC26; TY180507 LOC29; LOC26 = (NIM_BOOL)0; LOC26 = isdefined_202011_1967573533(((NimStringDesc*) &T839829468_629)); if (!LOC26) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1); } goto LA19; LA27: ; { NIM_BOOL LOC31; TY180507 LOC34; LOC31 = (NIM_BOOL)0; LOC31 = isdefined_202011_1967573533(((NimStringDesc*) &T839829468_631)); if (!LOC31) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1); } goto LA19; LA32: ; { TY180507 LOC36; memset((void*)LOC36, 0, sizeof(LOC36)); LOC36[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1); } LA19: ; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = safepoint0; LOC38 = (NI)0; LOC38 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1); length0 = sonslen_297351_850551059(t0); (*p0).nestedtrystmts = (Tnodeseq294796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode294802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; expr_541248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC39, 0, sizeof(LOC39)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0); endblock_546060_839829468(p0); memset((void*)LOC40, 0, sizeof(LOC40)); LOC41 = (NI)0; LOC41 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0); memset((void*)LOC42, 0, sizeof(LOC42)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0); { TY535289 LOC47; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA45; memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0); } LA45: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); { while (1) { NIM_BOOL LOC50; NI blen0; LOC50 = (NIM_BOOL)0; LOC50 = (i0 < length0); if (!(LOC50)) goto LA51; LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 87)); LA51: ; if (!LOC50) goto LA49; { NIM_BOOL LOC54; LOC54 = (NIM_BOOL)0; LOC54 = ((*d0).k == ((Tlockind294808) 1)); if (!(LOC54)) goto LA55; LOC54 = isemptytype_299440_850551059((*t0).typ); LA55: ; if (!LOC54) goto LA56; (*d0).k = ((Tlockind294808) 0); } LA56: ; blen0 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i0]); { TY535289 LOC67; NI LOC68; TY180507 LOC69; TY535289 LOC70; if (!(blen0 == ((NI) 1))) goto LA60; { TY535289 LOC66; if (!(((NI) 1) < i0)) goto LA64; memset((void*)LOC66, 0, sizeof(LOC66)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0); } LA64: ; memset((void*)LOC67, 0, sizeof(LOC67)); LOC68 = (NI)0; LOC68 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1); expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC70, 0, sizeof(LOC70)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0); endblock_546060_839829468(p0); } goto LA58; LA60: ; { Ropeobj180006* orexpr0; TY180507 LOC91; NI LOC92; TY180507 LOC93; TY535289 LOC94; orexpr0 = NIM_NIL; { NI j_550247_839829468; NI HEX3Atmp_550521_839829468; NI res_550524_839829468; j_550247_839829468 = (NI)0; HEX3Atmp_550521_839829468 = (NI)0; HEX3Atmp_550521_839829468 = (NI)(blen0 - ((NI) 2)); res_550524_839829468 = ((NI) 0); { while (1) { NimStringDesc* isobjformat0; TY180507 LOC86; if (!(res_550524_839829468 <= HEX3Atmp_550521_839829468)) goto LA74; j_550247_839829468 = res_550524_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA77; add_180487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA77: ; { NIM_BOOL LOC81; LOC81 = (NIM_BOOL)0; LOC81 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC81) goto LA82; LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA82: ; if (!!(LOC81)) goto LA83; isobjformat0 = copyString(((NimStringDesc*) &T839829468_637)); } goto LA79; LA83: ; { isobjformat0 = copyString(((NimStringDesc*) &T839829468_638)); } LA79: ; memset((void*)LOC86, 0, sizeof(LOC86)); LOC86[0] = gentypeinfo_537941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_550247_839829468]).typ); appcg_534632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1); res_550524_839829468 += ((NI) 1); } LA74: ; } } { if (!(((NI) 1) < i0)) goto LA89; line_534695_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_620)); } LA89: ; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = orexpr0; LOC92 = (NI)0; LOC92 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1); memset((void*)LOC93, 0, sizeof(LOC93)); LOC93[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1); expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC94, 0, sizeof(LOC94)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0); endblock_546060_839829468(p0); } LA58: ; i0 += ((NI) 1); } LA49: ; } (*p0).inexceptblock -= ((NI) 1); LOC95 = (Tnode294802*)0; LOC95 = pop_320246_1689653243((&(*p0).nestedtrystmts)); endblock_546060_839829468(p0); { NIM_BOOL LOC98; Ropeobj180006* LOC102; LOC98 = (NIM_BOOL)0; LOC98 = (i0 < length0); if (!(LOC98)) goto LA99; LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 107)); LA99: ; if (!LOC98) goto LA100; (*p0).finallysafepoints = (TY193350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj180006*)); asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0); ++(*p0).finallysafepoints->Sup.len; gensimpleblock_546095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); LOC102 = (Ropeobj180006*)0; LOC102 = pop_180530_1689653243((&(*p0).finallysafepoints)); } LA100: ; memset((void*)LOC103, 0, sizeof(LOC103)); LOC103[0] = safepoint0; linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1); } N_NIMCALL(NimStringDesc*, getraisefrmt_548824_839829468)(Tcproc531021* p0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = copyString(((NimStringDesc*) &T839829468_641)); return result0; } N_NIMCALL(void, genraisestmt_548828_839829468)(Tcproc531021* p0, Tnode294802* t0) { { Tnode294802* finallyblock0; if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3; finallyblock0 = lastson_297364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]); { if (!((*finallyblock0).kind == ((Tnodekind294020) 107))) goto LA7; gensimpleblock_546095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; } LA3: ; { Tloc294816 a0; Ropeobj180006* e0; Ttype294840* typ0; NimStringDesc* LOC13; TY534811 LOC14; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA11; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); e0 = rdloc_540188_839829468(a0); typ0 = skiptypes_298099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320)); genlinedir_534823_839829468(p0, t0); LOC13 = (NimStringDesc*)0; LOC13 = getraisefrmt_548824_839829468(p0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = e0; LOC14[1] = makecstring_193638_155036129((*(*(*typ0).sym).name).s); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), LOC13, LOC14, 2); } goto LA9; LA11: ; { genlinedir_534823_839829468(p0, t0); { NIM_BOOL LOC18; NIM_BOOL LOC19; TY535289 LOC24; Ropeobj180006* LOC25; LOC18 = (NIM_BOOL)0; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA20: ; LOC18 = LOC19; if (!(LOC18)) goto LA21; LOC18 = !(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 31))&63U)))!=0)); LA21: ; if (!LOC18) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC25 = (Ropeobj180006*)0; LOC25 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0); line_534690_839829468(p0, ((Tcprocsection531011) 2), LOC25); } goto LA16; LA22: ; { TY535289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0); } LA16: ; } LA9: ; } N_NIMCALL(void, gentypesection_540184_839829468)(Tcgen531027* m0, Tnode294802* n0) { } N_NIMCALL(Tcfilesection531005, determinesection_550819_839829468)(Tnode294802* n0) { Tcfilesection531005 result0; result0 = (Tcfilesection531005)0; result0 = ((Tcfilesection531005) 7); { NIM_BOOL LOC3; NI LOC4; NimStringDesc* sec0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_295081_850551059(n0); LOC3 = (((NI) 1) <= LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind294020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind294020) 22)); LA5: ; if (!LOC3) goto LA6; sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643)); if (!LOC10) goto LA11; result0 = ((Tcfilesection531005) 3); } goto LA8; LA11: ; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644)); if (!LOC14) goto LA15; result0 = ((Tcfilesection531005) 9); } goto LA8; LA15: ; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645)); if (!LOC18) goto LA19; result0 = ((Tcfilesection531005) 1); } goto LA8; LA19: ; LA8: ; } LA6: ; return result0; } N_NIMCALL(void, genemit_550839_839829468)(Tcproc531021* p0, Tnode294802* t0) { Ropeobj180006* s0; s0 = genasmoremitstmt_550529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE); { Tcfilesection531005 section0; Tnode294802* LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; LOC5 = (Tnode294802*)0; LOC5 = HEX5BHEX5D_295238_850551059(t0, ((NI) 1)); section0 = determinesection_550819_839829468(LOC5); genclinedir_534813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info); add_180482_2381377266(&(*(*p0).module).s[(section0)- 0], s0); } goto LA1; LA3: ; { genlinedir_534823_839829468(p0, t0); line_534690_839829468(p0, ((Tcprocsection531011) 2), s0); } LA1: ; } N_NIMCALL(void, genbreakpoint_550862_839829468)(Tcproc531021* p0, Tnode294802* t0) { NimStringDesc* name0; name0 = (NimStringDesc*)0; { TY537238 LOC12; NI LOC13; NimStringDesc* LOC14; if (!(((*p0).options &(1U<<((NU)(((Toption171009) 17))&31U)))!=0)) goto LA3; { if (!((*t0).kind == ((Tnodekind294020) 34))) goto LA7; name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval); } goto LA5; LA7: ; { NimStringDesc* LOC10; NimStringDesc* LOC11; breakpointid_550860_839829468 += ((NI) 1); LOC10 = (NimStringDesc*)0; LOC11 = (NimStringDesc*)0; LOC11 = nimIntToStr(breakpointid_550860_839829468); LOC10 = rawNewString(LOC11->Sup.len + 2); appendString(LOC10, ((NimStringDesc*) &T839829468_646)); appendString(LOC10, LOC11); name0 = LOC10; } LA5: ; genlinedir_534823_839829468(p0, t0); memset((void*)LOC12, 0, sizeof(LOC12)); LOC13 = (NI)0; LOC13 = tolinenumber_194415_155036129((*t0).info); LOC12[0] = rope_180401_2381377266(((NI64) (LOC13))); LOC14 = (NimStringDesc*)0; LOC14 = tofilename_194260_155036129((*t0).info.fileindex); LOC12[1] = makecstring_193638_155036129(LOC14); LOC12[2] = makecstring_193638_155036129(name0); appcg_534632_839829468((*p0).module, &gbreakpoints_550861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3); } LA3: ; } N_NIMCALL(void, genwatchpoint_551016_839829468)(Tcproc531021* p0, Tnode294802* n0) { Tloc294816 a0; Ttype294840* typ0; TY537238 LOC5; NimStringDesc* LOC6; { { if (!!((((*p0).options &(1U<<((NU)(((Toption171009) 17))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); typ0 = skiptypes_298099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = addrloc_540204_839829468(a0); LOC6 = (NimStringDesc*)0; LOC6 = rendertree_313044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0); LOC5[1] = makecstring_193638_155036129(LOC6); LOC5[2] = gentypeinfo_537941_839829468((*p0).module, typ0); linecg_534707_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3); }BeforeRet: ; } N_NIMCALL(void, genpragma_551039_839829468)(Tcproc531021* p_551041_839829468, Tnode294802* n0) { { NI i_551054_839829468; NI HEX3Atmp_551073_839829468; NI LOC2; NI res_551076_839829468; i_551054_839829468 = (NI)0; HEX3Atmp_551073_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_297351_850551059(n0); HEX3Atmp_551073_839829468 = (NI)(LOC2 - ((NI) 1)); res_551076_839829468 = ((NI) 0); { while (1) { Tnode294802* it0; Tspecialword277003 LOC5; if (!(res_551076_839829468 <= HEX3Atmp_551073_839829468)) goto LA4; i_551054_839829468 = res_551076_839829468; it0 = (*n0).kindU.S6.sons->data[i_551054_839829468]; LOC5 = (Tspecialword277003)0; LOC5 = whichpragma_320911_2616423590(it0); switch (LOC5) { case ((Tspecialword277003) 191): { genemit_550839_839829468(p_551041_839829468, it0); } break; case ((Tspecialword277003) 131): { genbreakpoint_550862_839829468(p_551041_839829468, it0); } break; case ((Tspecialword277003) 176): { genwatchpoint_551016_839829468(p_551041_839829468, it0); } break; case ((Tspecialword277003) 183): { Tcproc531021* p0; Ropeobj180006** LOC10; p0 = newproc_531206_3723162438(NIM_NIL, (*p_551041_839829468).module); (*p0).options = ((*p0).options & ~ 98304); genstmts_541244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]); LOC10 = (Ropeobj180006**)0; LOC10 = s_531179_3723162438(p0, ((Tcprocsection531011) 2)); asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10)); } break; default: { } break; } res_551076_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genparforstmt_548208_839829468)(Tcproc531021* p0, Tnode294802* t0) { NI oldbreakidx_548411_839829468; Tsym294834* forloopvar0; Tloc294816 rangea0; Tloc294816 rangeb0; Tnode294802* call0; TY537235 LOC1; NimStringDesc* LOC2; TY535289 LOC3; (*p0).withinloop += ((NI) 1); genlinedir_534823_839829468(p0, t0); oldbreakidx_548411_839829468 = (*p0).breakidx; forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)(&rangea0), 0, sizeof(rangea0)); memset((void*)(&rangeb0), 0, sizeof(rangeb0)); assignlocalvar_540614_839829468(p0, forloopvar0); call0 = (*t0).kindU.S6.sons->data[((NI) 1)]; initlocexpr_541283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0)); initlocexpr_541283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468((*forloopvar0).loc); LOC1[1] = rdloc_540188_839829468(rangea0); LOC1[2] = rdloc_540188_839829468(rangeb0); LOC2 = (NimStringDesc*)0; LOC2 = getstr_299230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]); LOC1[3] = rope_180277_2381377266(LOC2); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); (*p0).breakidx = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; genstmts_541244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]); endblock_546060_839829468(p0); (*p0).breakidx = oldbreakidx_548411_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, genstate_546117_839829468)(Tcproc531021* p0, Tnode294802* n0) { NI64 idx0; TY180507 LOC9; { NIM_BOOL LOC3; NI LOC4; NimStringDesc* LOC8; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_295081_850551059(n0); LOC3 = (LOC4 == ((NI) 1)); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 6)); LA5: ; if (!!(LOC3)) goto LA6; LOC8 = (NimStringDesc*)0; LOC8 = HEX24_198185_1689653243(T839829468_650); internalerror_198113_155036129(LOC8); } LA6: ; idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rope_180401_2381377266(idx0); linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1); } N_NIMCALL(void, gengotostate_546144_839829468)(Tcproc531021* p0, Tnode294802* n0) { Tloc294816 a0; TY180507 LOC1; TY535289 LOC2; TY535289 LOC7; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_540188_839829468(a0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1); (*p0).beforeretneeded = NIM_TRUE; memset((void*)LOC2, 0, sizeof(LOC2)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0); { NI64 i_546214_839829468; NI64 HEX3Atmp_546223_839829468; NI64 res_546226_839829468; i_546214_839829468 = (NI64)0; HEX3Atmp_546223_839829468 = (NI64)0; HEX3Atmp_546223_839829468 = lastord_322004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ); res_546226_839829468 = IL64(0); { while (1) { TY180507 LOC6; if (!(res_546226_839829468 <= HEX3Atmp_546223_839829468)) goto LA5; i_546214_839829468 = res_546226_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_180401_2381377266(i_546214_839829468); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1); res_546226_839829468 += ((NI) 1); } LA5: ; } } memset((void*)LOC7, 0, sizeof(LOC7)); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0); } N_NIMCALL(void, genbreakstate_546229_839829468)(Tcproc531021* p0, Tnode294802* n0) { Tloc294816 a0; memset((void*)(&a0), 0, sizeof(a0)); { TY180507 LOC5; if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 155))) goto LA3; initlocexpr_541283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_540188_839829468(a0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1); } goto LA1; LA3: ; { TY180507 LOC7; initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_540188_839829468(a0); linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1); } LA1: ; } N_NIMCALL(void, expr_541248_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) { switch ((*n0).kind) { case ((Tnodekind294020) 3): { Tsym294834* sym0; sym0 = (*n0).kindU.S4.sym; switch ((*sym0).kind) { case ((Tsymkind294435) 13): { { if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5; fillprocloc_541201_839829468(sym0); genprocprototype_541254_839829468((*p0).module, sym0); } goto LA3; LA5: ; { genproc_534951_839829468((*p0).module, sym0); } LA3: ; putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind294435) 12): case ((Tsymkind294435) 15): case ((Tsymkind294435) 14): { { NimStringDesc* LOC13; if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 23))&31U)))!=0)) goto LA11; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48); appendString(LOC13, ((NimStringDesc*) &T839829468_270)); appendString(LOC13, (*(*sym0).name).s); localerror_198085_155036129((*n0).info, LOC13); } LA11: ; genproc_534951_839829468((*p0).module, sym0); { NIM_BOOL LOC16; NimStringDesc* LOC20; LOC16 = (NIM_BOOL)0; LOC16 = ((*sym0).loc.r == NIM_NIL); if (LOC16) goto LA17; LOC16 = ((*sym0).loc.t == NIM_NIL); LA17: ; if (!LOC16) goto LA18; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC20, ((NimStringDesc*) &T839829468_271)); appendString(LOC20, (*(*sym0).name).s); internalerror_198100_155036129((*n0).info, LOC20); } LA18: ; putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind294435) 10): { { NIM_BOOL LOC24; Ropeobj180006* LOC27; LOC24 = (NIM_BOOL)0; LOC24 = issimpleconst_534311_839829468((*sym0).typ); if (!LOC24) goto LA25; LOC27 = (Ropeobj180006*)0; LOC27 = genliteral_551476_839829468(p0, (*sym0).ast, (*sym0).typ); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc294812) 1)); } goto LA22; LA25: ; { gencomplexconst_560249_839829468(p0, sym0, d0); } LA22: ; } break; case ((Tsymkind294435) 19): { Ropeobj180006* LOC30; LOC30 = (Ropeobj180006*)0; LOC30 = rope_180401_2381377266(((NI64) ((*sym0).position))); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc294812) 0)); } break; case ((Tsymkind294435) 8): case ((Tsymkind294435) 20): case ((Tsymkind294435) 11): case ((Tsymkind294435) 9): { { if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34; genvarprototype_541236_839829468((*p0).module, sym0); } LA34: ; { NIM_BOOL LOC38; NimStringDesc* LOC42; NimStringDesc* LOC43; LOC38 = (NIM_BOOL)0; LOC38 = ((*sym0).loc.r == NIM_NIL); if (LOC38) goto LA39; LOC38 = ((*sym0).loc.t == NIM_NIL); LA39: ; if (!LOC38) goto LA40; LOC42 = (NimStringDesc*)0; LOC43 = (NimStringDesc*)0; LOC43 = nimIntToStr((*sym0).Sup.id); LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20); appendString(LOC42, ((NimStringDesc*) &T839829468_285)); appendString(LOC42, (*(*sym0).name).s); appendString(LOC42, ((NimStringDesc*) &T839829468_12)); appendString(LOC42, LOC43); internalerror_198100_155036129((*n0).info, LOC42); } LA40: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0)) goto LA46; accessthreadlocalvar_534945_839829468(p0, sym0); { NIM_BOOL LOC50; Ropeobj180006* LOC53; LOC50 = (NIM_BOOL)0; LOC50 = emulatedthreadvars_534949_839829468(); if (!LOC50) goto LA51; LOC53 = (Ropeobj180006*)0; LOC53 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r); putintodest_552468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc294812) 0)); } goto LA48; LA51: ; { putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } LA48: ; } goto LA44; LA46: ; { putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } LA44: ; } break; case ((Tsymkind294435) 5): { { NIM_BOOL LOC59; NimStringDesc* LOC63; NimStringDesc* LOC64; LOC59 = (NIM_BOOL)0; LOC59 = ((*sym0).loc.r == NIM_NIL); if (LOC59) goto LA60; LOC59 = ((*sym0).loc.t == NIM_NIL); LA60: ; if (!LOC59) goto LA61; LOC63 = (NimStringDesc*)0; LOC64 = (NimStringDesc*)0; LOC64 = nimIntToStr((*sym0).Sup.id); LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21); appendString(LOC63, ((NimStringDesc*) &T839829468_289)); appendString(LOC63, (*(*sym0).name).s); appendString(LOC63, ((NimStringDesc*) &T839829468_12)); appendString(LOC63, LOC64); internalerror_198100_155036129((*n0).info, LOC63); } LA61: ; putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind294435) 3): { { NIM_BOOL LOC68; NimStringDesc* LOC72; NimStringDesc* LOC73; LOC68 = (NIM_BOOL)0; LOC68 = ((*sym0).loc.r == NIM_NIL); if (LOC68) goto LA69; LOC68 = ((*sym0).loc.t == NIM_NIL); LA69: ; if (!LOC68) goto LA70; LOC72 = (NimStringDesc*)0; LOC73 = (NimStringDesc*)0; LOC73 = nimIntToStr((*sym0).Sup.id); LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22); appendString(LOC72, ((NimStringDesc*) &T839829468_290)); appendString(LOC72, (*(*sym0).name).s); appendString(LOC72, ((NimStringDesc*) &T839829468_12)); appendString(LOC72, LOC73); internalerror_198100_155036129((*n0).info, LOC72); } LA70: ; putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } break; default: { NimStringDesc* LOC75; LOC75 = (NimStringDesc*)0; LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI294435))->Sup.len + 22); appendString(LOC75, ((NimStringDesc*) &T839829468_291)); appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI294435))); appendString(LOC75, ((NimStringDesc*) &T839829468_292)); internalerror_198100_155036129((*n0).info, LOC75); } break; } } break; case ((Tnodekind294020) 23): { { NIM_BOOL LOC79; Ropeobj180006* LOC82; LOC79 = (NIM_BOOL)0; LOC79 = isemptytype_299440_850551059((*n0).typ); if (!!(LOC79)) goto LA80; LOC82 = (Ropeobj180006*)0; LOC82 = genliteral_541273_839829468(p0, n0); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc294812) 0)); } LA80: ; } break; case ((Tnodekind294020) 20) ... ((Tnodekind294020) 22): { Ropeobj180006* LOC84; LOC84 = (Ropeobj180006*)0; LOC84 = genliteral_541273_839829468(p0, n0); putdataintodest_552436_839829468(p0, d0, (*n0).typ, LOC84); } break; case ((Tnodekind294020) 6) ... ((Tnodekind294020) 15): case ((Tnodekind294020) 16) ... ((Tnodekind294020) 19): case ((Tnodekind294020) 5): { Ropeobj180006* LOC86; LOC86 = (Ropeobj180006*)0; LOC86 = genliteral_541273_839829468(p0, n0); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc294812) 0)); } break; case ((Tnodekind294020) 27): case ((Tnodekind294020) 32): case ((Tnodekind294020) 29): case ((Tnodekind294020) 30): case ((Tnodekind294020) 31): case ((Tnodekind294020) 26): case ((Tnodekind294020) 28): { Tnode294802* op0; genlinedir_534823_839829468(p0, n0); op0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { Tloc294816 a0; if (!(*n0).typ == 0) goto LA90; memset((void*)(&a0), 0, sizeof(a0)); { NIM_BOOL LOC94; LOC94 = (NIM_BOOL)0; LOC94 = ((*op0).kind == ((Tnodekind294020) 3)); if (!(LOC94)) goto LA95; LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic294524) 0))); LA95: ; if (!LOC94) goto LA96; genmagicexpr_559033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic); } goto LA92; LA96: ; { gencall_545632_839829468(p0, n0, (&a0)); } LA92: ; } goto LA88; LA90: ; { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = ((*op0).kind == ((Tnodekind294020) 3)); if (!(LOC102)) goto LA103; LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic294524) 0))); LA103: ; if (!LOC102) goto LA104; genmagicexpr_559033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic); } goto LA100; LA104: ; { gencall_545632_839829468(p0, n0, d0); } LA100: ; } LA88: ; } break; case ((Tnodekind294020) 39): { { NIM_BOOL LOC110; NI LOC112; Ropeobj180006* LOC115; LOC110 = (NIM_BOOL)0; LOC110 = isdeepconstexpr_320566_2616423590(n0); if (!(LOC110)) goto LA111; LOC112 = (NI)0; LOC112 = len_295081_850551059(n0); LOC110 = !((LOC112 == ((NI) 0))); LA111: ; if (!LOC110) goto LA113; LOC115 = (Ropeobj180006*)0; LOC115 = gensetnode_551664_839829468(p0, n0); putintodest_552468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc294812) 0)); } goto LA108; LA113: ; { gensetconstr_559496_839829468(p0, n0, d0); } LA108: ; } break; case ((Tnodekind294020) 41): { { NIM_BOOL LOC120; NI LOC122; LOC120 = (NIM_BOOL)0; LOC120 = isdeepconstexpr_320566_2616423590(n0); if (!(LOC120)) goto LA121; LOC122 = (NI)0; LOC122 = len_295081_850551059(n0); LOC120 = !((LOC122 == ((NI) 0))); LA121: ; if (!LOC120) goto LA123; exprcomplexconst_560684_839829468(p0, n0, d0); } goto LA118; LA123: ; { Ttype294840* LOC126; LOC126 = (Ttype294840*)0; LOC126 = skiptypes_298099_850551059((*n0).typ, IL64(211106242013440)); if (!((*LOC126).kind == ((Ttypekind294244) 24))) goto LA127; genseqconstr_557004_839829468(p0, n0, d0); } goto LA118; LA127: ; { genarrayconstr_560207_839829468(p0, n0, d0); } LA118: ; } break; case ((Tnodekind294020) 37): { { NIM_BOOL LOC133; NI LOC135; LOC133 = (NIM_BOOL)0; LOC133 = isdeepconstexpr_320566_2616423590(n0); if (!(LOC133)) goto LA134; LOC135 = (NI)0; LOC135 = len_295081_850551059(n0); LOC133 = !((LOC135 == ((NI) 0))); LA134: ; if (!LOC133) goto LA136; exprcomplexconst_560684_839829468(p0, n0, d0); } goto LA131; LA136: ; { gentupleconstr_559618_839829468(p0, n0, d0); } LA131: ; } break; case ((Tnodekind294020) 38): { genobjconstr_556903_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 61): { gencast_558537_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 58): case ((Tnodekind294020) 59): case ((Tnodekind294020) 60): { genconv_558632_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 64): case ((Tnodekind294020) 63): { genaddr_555051_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 42): { genbracketexpr_556277_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 47): case ((Tnodekind294020) 65): { genderef_545921_839829468(p0, n0, d0, NIM_FALSE); } break; case ((Tnodekind294020) 45): { genrecordfield_555448_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 46): { gencheckedrecordfield_556046_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 127): case ((Tnodekind294020) 112): { genblock_548083_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 126): { genstmtlistexpr_560402_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 115): { { NI i_561023_839829468; NI HEX3Atmp_561276_839829468; NI LOC151; NI res_561279_839829468; i_561023_839829468 = (NI)0; HEX3Atmp_561276_839829468 = (NI)0; LOC151 = (NI)0; LOC151 = sonslen_297351_850551059(n0); HEX3Atmp_561276_839829468 = (NI)(LOC151 - ((NI) 1)); res_561279_839829468 = ((NI) 0); { while (1) { if (!(res_561279_839829468 <= HEX3Atmp_561276_839829468)) goto LA153; i_561023_839829468 = res_561279_839829468; genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[i_561023_839829468]); res_561279_839829468 += ((NI) 1); } LA153: ; } } } break; case ((Tnodekind294020) 48): case ((Tnodekind294020) 92): { genif_546982_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 93): { expr_541248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0); } break; case ((Tnodekind294020) 66): { downconv_560581_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 67): { upconv_560431_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 68): { genrangechck_558590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563)); } break; case ((Tnodekind294020) 69): { genrangechck_558590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564)); } break; case ((Tnodekind294020) 70): { genrangechck_558590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565)); } break; case ((Tnodekind294020) 71): { convstrtocstr_558642_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 72): { convcstrtostr_558654_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 51): case ((Tnodekind294020) 52): { Tsym294834* sym0; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; genproc_534951_839829468((*p0).module, sym0); { NIM_BOOL LOC166; NimStringDesc* LOC170; LOC166 = (NIM_BOOL)0; LOC166 = ((*sym0).loc.r == NIM_NIL); if (LOC166) goto LA167; LOC166 = ((*sym0).loc.t == NIM_NIL); LA167: ; if (!LOC166) goto LA168; LOC170 = (NimStringDesc*)0; LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC170, ((NimStringDesc*) &T839829468_271)); appendString(LOC170, (*(*sym0).name).s); internalerror_198100_155036129((*n0).info, LOC170); } LA168: ; putlocintodest_541258_839829468(p0, d0, (*sym0).loc); } break; case ((Tnodekind294020) 155): { genclosure_559836_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 1): { } break; case ((Tnodekind294020) 96): { genwhilestmt_547984_839829468(p0, n0); } break; case ((Tnodekind294020) 99): case ((Tnodekind294020) 100): { genvarstmt_546854_839829468(p0, n0); } break; case ((Tnodekind294020) 101): { genconststmt_546909_839829468(p0, n0); } break; case ((Tnodekind294020) 94): { internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594)); } break; case ((Tnodekind294020) 97): { gencase_549826_839829468(p0, n0, d0); } break; case ((Tnodekind294020) 109): { genreturnstmt_547617_839829468(p0, n0); } break; case ((Tnodekind294020) 110): { genbreakstmt_548444_839829468(p0, n0); } break; case ((Tnodekind294020) 73): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag294427) 14))&15U)))!=0))) goto LA183; genasgn_551239_839829468(p0, n0, NIM_FALSE); } LA183: ; } break; case ((Tnodekind294020) 74): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag294427) 14))&15U)))!=0))) goto LA188; genasgn_551239_839829468(p0, n0, !(((*p0).prc == NIM_NIL))); } LA188: ; } break; case ((Tnodekind294020) 114): { { Tloc294816 a0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA193; genlinedir_534823_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA193: ; } break; case ((Tnodekind294020) 89): { genasmstmt_550659_839829468(p0, n0); } break; case ((Tnodekind294020) 106): { { NIM_BOOL LOC199; NIM_BOOL LOC200; LOC199 = (NIM_BOOL)0; LOC200 = (NIM_BOOL)0; LOC200 = (gcmd_171132_2607990831 == ((Tcommands171076) 2)); if (LOC200) goto LA201; LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA201: ; LOC199 = LOC200; if (!(LOC199)) goto LA202; LOC199 = !(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 31))&63U)))!=0)); LA202: ; if (!LOC199) goto LA203; gentrycpp_549865_839829468(p0, n0, d0); } goto LA197; LA203: ; { gentry_550114_839829468(p0, n0, d0); } LA197: ; } break; case ((Tnodekind294020) 108): { genraisestmt_548828_839829468(p0, n0); } break; case ((Tnodekind294020) 98): { gentypesection_540184_839829468((*p0).module, n0); } break; case ((Tnodekind294020) 125): case ((Tnodekind294020) 84): case ((Tnodekind294020) 121): case ((Tnodekind294020) 116): case ((Tnodekind294020) 117): case ((Tnodekind294020) 118): case ((Tnodekind294020) 119): case ((Tnodekind294020) 120): case ((Tnodekind294020) 83): case ((Tnodekind294020) 82): { } break; case ((Tnodekind294020) 90): { genpragma_551039_839829468(p0, n0); } break; case ((Tnodekind294020) 91): { Tnode294802* LOC211; LOC211 = (Tnode294802*)0; LOC211 = lastson_297364_850551059(n0); expr_541248_839829468(p0, LOC211, d0); } break; case ((Tnodekind294020) 79): case ((Tnodekind294020) 80): case ((Tnodekind294020) 81): { { Tsym294834* prc0; if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1))) goto LA215; prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC219; Tsym294834* LOC220; LOC219 = (NIM_BOOL)0; LOC220 = (Tsym294834*)0; LOC220 = skipgenericowner_299279_850551059(prc0); LOC219 = ((*LOC220).kind == ((Tsymkind294435) 6)); if (!(LOC219)) goto LA221; LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 23))&31U)))!=0)); LA221: ; if (!LOC219) goto LA222; { NIM_BOOL LOC226; NIM_BOOL LOC227; NIM_BOOL LOC228; NIM_BOOL LOC229; Tsym294834* LOC231; NIM_BOOL LOC234; LOC226 = (NIM_BOOL)0; LOC227 = (NIM_BOOL)0; LOC228 = (NIM_BOOL)0; LOC229 = (NIM_BOOL)0; LOC229 = !(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 2))&63U)))!=0)); if (!(LOC229)) goto LA230; LOC231 = (Tsym294834*)0; LOC231 = getmodule_301123_2984716966(prc0); LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag294184) 25))&31U)))!=0)); LA230: ; LOC228 = LOC229; if (LOC228) goto LA232; LOC228 = ((65600 & (*prc0).flags) == 64); LA232: ; LOC227 = LOC228; if (LOC227) goto LA233; LOC234 = (NIM_BOOL)0; LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 6))&31U)))!=0); if (!(LOC234)) goto LA235; LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 5))&15U)))!=0); LA235: ; LOC227 = LOC234; LA233: ; LOC226 = LOC227; if (LOC226) goto LA236; LOC226 = ((*prc0).kind == ((Tsymkind294435) 13)); LA236: ; if (!LOC226) goto LA237; { NIM_BOOL LOC241; Tnode294802* LOC242; LOC241 = (NIM_BOOL)0; LOC242 = (Tnode294802*)0; LOC242 = getbody_337227_1724185294(prc0); LOC241 = !(((*LOC242).kind == ((Tnodekind294020) 1))); if (LOC241) goto LA243; LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0); LA243: ; if (!LOC241) goto LA244; genproc_534951_839829468((*p0).module, prc0); } LA244: ; } LA237: ; } LA222: ; } LA215: ; } break; case ((Tnodekind294020) 95): { genparforstmt_548208_839829468(p0, n0); } break; case ((Tnodekind294020) 157): { genstate_546117_839829468(p0, n0); } break; case ((Tnodekind294020) 156): { gengotostate_546144_839829468(p0, n0); } break; case ((Tnodekind294020) 158): { genbreakstate_546229_839829468(p0, n0); } break; default: { NimStringDesc* LOC251; LOC251 = (NimStringDesc*)0; LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI294020))->Sup.len + 25); appendString(LOC251, ((NimStringDesc*) &T839829468_291)); appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI294020))); appendString(LOC251, ((NimStringDesc*) &T839829468_657)); internalerror_198100_155036129((*n0).info, LOC251); } break; } } N_NIMCALL(void, genstmts_541244_839829468)(Tcproc531021* p0, Tnode294802* t0) { Tloc294816 a0; memset((void*)(&a0), 0, sizeof(a0)); expr_541248_839829468(p0, t0, (&a0)); { NimStringDesc* LOC5; if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_198185_1689653243(T839829468_658); internalerror_198113_155036129(LOC5); } LA3: ; } N_NIMCALL(Tnode294802*, myprocess_565402_839829468)(Tpasscontext343002* b0, Tnode294802* n0) { Tnode294802* result0; Tcgen531027* m0; { result0 = (Tnode294802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_343085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen531027*) (b0)); (*(*m0).initproc).options = initprocoptions_564635_839829468(m0); genstmts_541244_839829468((*m0).initproc, n0); }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj180006*, getsomeinitname_563904_839829468)(Tsym294834* m0, NimStringDesc* suffix0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { NimStringDesc* LOC5; if (!((12288 & (*m0).flags) == 0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_530847_2036603609((*(*(*m0).owner).name).s); result0 = rope_180277_2381377266(LOC5); add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_12)); } LA3: ; add_180487_2381377266(&result0, (*(*m0).name).s); add_180487_2381377266(&result0, suffix0); return result0; } N_NIMCALL(Ropeobj180006*, getinitname_564235_839829468)(Tsym294834* m0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = getsomeinitname_563904_839829468(m0, ((NimStringDesc*) &T839829468_659)); return result0; } N_NIMCALL(Ropeobj180006*, getdatinitname_564239_839829468)(Tsym294834* m0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = getsomeinitname_563904_839829468(m0, ((NimStringDesc*) &T839829468_660)); return result0; } N_NIMCALL(void, registermoduletomain_564243_839829468)(Tsym294834* m0) { Ropeobj180006* init0; Ropeobj180006* datinit0; TY180507 LOC1; TY180507 LOC2; init0 = getinitname_564235_839829468(m0); datinit0 = getdatinitname_564239_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = init0; addf_181205_2381377266(&mainmodprocs_531148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = datinit0; addf_181205_2381377266(&mainmodprocs_531148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1); { TY180507 LOC7; Ropeobj180006* initcall0; TY180507 LOC8; if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0))) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = datinit0; addf_181205_2381377266(&maindatinit_531151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = init0; initcall0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1); { if (!(((*m0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA11; add_180482_2381377266(&mainmodinit_531149_3723162438, initcall0); } goto LA9; LA11: ; { add_180482_2381377266(&othermodsinit_531150_3723162438, initcall0); } LA9: ; } LA5: ; } N_NIMCALL(Ropeobj180006*, genfilenames_563688_839829468)(Tcgen531027* m0) { Ropeobj180006* result0; Ropeobj180006* LOC1; result0 = (Ropeobj180006*)0; LOC1 = (Ropeobj180006*)0; LOC1 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_673)); result0 = NIM_NIL; { NI i_563717_839829468; NI HEX3Atmp_563722_839829468; NI res_563725_839829468; i_563717_839829468 = (NI)0; HEX3Atmp_563722_839829468 = (NI)0; HEX3Atmp_563722_839829468 = ((fileinfos_193629_155036129 ? fileinfos_193629_155036129->Sup.len : 0) - 1); res_563725_839829468 = ((NI) 0); { while (1) { TY180507 LOC5; if (!(res_563725_839829468 <= HEX3Atmp_563722_839829468)) goto LA4; i_563717_839829468 = res_563725_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = makecstring_193638_155036129(fileinfos_193629_155036129->data[i_563717_839829468].projpath); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1); res_563725_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genmainproc_563729_839829468)(Tcgen531027* m0) { NimStringDesc* nimmain0; NimStringDesc* othermain0; Ropeobj180006* initstackbottomcall0; TY538475 LOC38; TY537238 LOC47; nimmain0 = (NimStringDesc*)0; othermain0 = (NimStringDesc*)0; { NIM_BOOL LOC3; NIM_BOOL LOC12; LOC3 = (NIM_BOOL)0; LOC3 = (targetos_178629_4151366050 == ((Tsystemos178004) 2)); if (!(LOC3)) goto LA4; LOC3 = !(((gglobaloptions_171130_2607990831 & 1280) == 0)); LA4: ; if (!LOC3) goto LA5; { if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 10))&63U)))!=0)) goto LA9; nimmain0 = copyString(((NimStringDesc*) &T839829468_663)); othermain0 = copyString(((NimStringDesc*) &T839829468_664)); } goto LA7; LA9: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_666)); } LA7: ; LOC12 = (NIM_BOOL)0; LOC12 = includestr_148249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667)); } goto LA1; LA5: ; { if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 8))&63U)))!=0)) goto LA14; nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_668)); } goto LA1; LA14: ; { if (!(targetos_178629_4151366050 == ((Tsystemos178004) 24))) goto LA17; nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_670)); } goto LA1; LA17: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_671)); } LA1: ; { Ropeobj180006* LOC24; if (!!((gbreakpoints_550861_839829468 == NIM_NIL))) goto LA22; LOC24 = (Ropeobj180006*)0; LOC24 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_672)); } LA22: ; { Ropeobj180006* LOC29; if (!((goptions_171128_2607990831 &(1U<<((NU)(((Toption171009) 17))&31U)))!=0)) goto LA27; LOC29 = (Ropeobj180006*)0; LOC29 = genfilenames_563688_839829468(m0); add_180482_2381377266(&gbreakpoints_550861_839829468, LOC29); } LA27: ; { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = (targetos_178629_4151366050 == ((Tsystemos178004) 24)); if (LOC32) goto LA33; LOC32 = (gselectedgc_171133_2607990831 == ((Tgcmode171080) 0)); LA33: ; if (!LOC32) goto LA34; initstackbottomcall0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_490)); } goto LA30; LA34: ; { TY535289 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); initstackbottomcall0 = ropecg_534407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0); } LA30: ; (*m0).labels += ((NI) 1); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = maindatinit_531151_3723162438; LOC38[1] = gbreakpoints_550861_839829468; LOC38[2] = othermodsinit_531150_3723162438; { NIM_BOOL LOC41; TY535289 LOC45; LOC41 = (NIM_BOOL)0; LOC41 = emulatedthreadvars_534949_839829468(); if (!(LOC41)) goto LA42; LOC41 = !((targetos_178629_4151366050 == ((Tsystemos178004) 24))); LA42: ; if (!LOC41) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC38[3] = ropecg_534407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0); } goto LA39; LA43: ; { LOC38[3] = rope_180277_2381377266(((NimStringDesc*) &T839829468_490)); } LA39: ; LOC38[4] = initstackbottomcall0; appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = mainmodinit_531149_3723162438; LOC47[1] = initstackbottomcall0; LOC47[2] = rope_180401_2381377266(((NI64) ((*m0).labels))); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 10))- 0], nimmain0, LOC47, 3); { TY535289 LOC52; if (!!(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 20))&63U)))!=0))) goto LA50; memset((void*)LOC52, 0, sizeof(LOC52)); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 10))- 0], othermain0, LOC52, 0); } LA50: ; } N_NIMCALL(Tnode294802*, myclose_565830_839829468)(Tpasscontext343002* b0, Tnode294802* n0) { Tnode294802* result0; Tcgen531027* m0; { result0 = (Tnode294802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_343085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen531027*) (b0)); { if (!!((n0 == NIM_NIL))) goto LA9; (*(*m0).initproc).options = initprocoptions_564635_839829468(m0); genstmts_541244_839829468((*m0).initproc, n0); } LA9: ; registermoduletomain_564243_839829468((*m0).module); { Tnode294802* disp0; if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA13; (*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 5))%(sizeof(NU8)*8)); disp0 = generatemethoddispatchers_434151_3853300031(); { NI i_565891_839829468; NI HEX3Atmp_565895_839829468; NI LOC16; NI res_565898_839829468; i_565891_839829468 = (NI)0; HEX3Atmp_565895_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_297351_850551059(disp0); HEX3Atmp_565895_839829468 = (NI)(LOC16 - ((NI) 1)); res_565898_839829468 = ((NI) 0); { while (1) { if (!(res_565898_839829468 <= HEX3Atmp_565895_839829468)) goto LA18; i_565891_839829468 = res_565898_839829468; genprocaux_562284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_565891_839829468]).kindU.S4.sym); res_565898_839829468 += ((NI) 1); } LA18: ; } } genmainproc_563729_839829468(m0); } LA13: ; }BeforeRet: ; return result0; } N_NIMCALL(void, finishmodule_565420_839829468)(Tcgen531027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Tsym294834* prc0; if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2; prc0 = (*m0).forwardedprocs->data[i0]; { NimStringDesc* LOC7; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 4))&31U)))!=0)) goto LA5; LOC7 = (NimStringDesc*)0; LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17); appendString(LOC7, ((NimStringDesc*) &T839829468_678)); appendString(LOC7, (*(*prc0).name).s); internalerror_198100_155036129((*prc0).info, LOC7); } LA5: ; genprocnoforward_562906_839829468(m0, prc0); i0 += ((NI) 1); } LA2: ; } gforwardedprocscounter_531171_3723162438 -= i0; (*m0).forwardedprocs = (Tsymseq294804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym294834*), ((NI) 0)); } N_NIMCALL(void, geninitcode_564286_839829468)(Tcgen531027* m0) { Ropeobj180006* initname0; Ropeobj180006* prc0; TY180507 LOC1; Ropeobj180006* LOC12; Ropeobj180006* LOC13; Ropeobj180006** LOC14; Ropeobj180006** LOC15; Ropeobj180006** LOC16; Ropeobj180006* LOC17; Ropeobj180006* LOC33; Ropeobj180006** LOC34; Ropeobj180006** LOC35; Ropeobj180006** LOC36; Ropeobj180006* LOC37; Ropeobj180006* LOC38; Ropeobj180006** LOC39; Ropeobj180006** LOC40; Ropeobj180006** LOC41; Ropeobj180006* LOC42; Ropeobj180006* LOC50; TY535289 LOC51; TY180507 LOC52; TY535289 LOC58; initname0 = getinitname_564235_839829468((*m0).module); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = initname0; prc0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1); { TY534811 LOC6; if (!(((NI) 0) < (*m0).typenodes)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = (*m0).typenodesname; LOC6[1] = rope_180401_2381377266(((NI64) ((*m0).typenodes))); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2); } LA4: ; { TY534811 LOC11; if (!(((NI) 0) < (*m0).nimtypes)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*m0).nimtypesname; LOC11[1] = rope_180401_2381377266(((NI64) ((*m0).nimtypes))); appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2); } LA9: ; LOC12 = (Ropeobj180006*)0; LOC12 = initgcframe_540435_839829468((*m0).initproc); add_180482_2381377266(&prc0, LOC12); LOC13 = (Ropeobj180006*)0; LOC13 = gensectionstart_532081_2760143328(((Tcprocsection531011) 0)); add_180482_2381377266(&prc0, LOC13); LOC14 = (Ropeobj180006**)0; LOC14 = s_531179_3723162438((*m0).preinitproc, ((Tcprocsection531011) 0)); add_180482_2381377266(&prc0, (*LOC14)); LOC15 = (Ropeobj180006**)0; LOC15 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 0)); add_180482_2381377266(&prc0, (*LOC15)); LOC16 = (Ropeobj180006**)0; LOC16 = s_531179_3723162438((*m0).postinitproc, ((Tcprocsection531011) 0)); add_180482_2381377266(&prc0, (*LOC16)); LOC17 = (Ropeobj180006*)0; LOC17 = gensectionend_532116_2760143328(((Tcprocsection531011) 0)); add_180482_2381377266(&prc0, LOC17); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0); if (!(LOC20)) goto LA21; LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 2))&7U)))!=0)); LA21: ; if (!LOC20) goto LA22; (*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 2))%(sizeof(NU8)*8)); { Ropeobj180006* procname0; Ropeobj180006* LOC28; Ropeobj180006* LOC29; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 0))&7U)))!=0))) goto LA26; procname0 = makecstring_193638_155036129((*(*(*m0).module).name).s); LOC28 = (Ropeobj180006*)0; LOC28 = quotedfilename_198818_155036129((*(*m0).module).info); LOC29 = (Ropeobj180006*)0; LOC29 = initframe_562140_839829468((*m0).initproc, procname0, LOC28); add_180482_2381377266(&prc0, LOC29); } goto LA24; LA26: ; { TY535289 LOC31; Ropeobj180006* LOC32; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj180006*)0; LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0); add_180482_2381377266(&prc0, LOC32); } LA24: ; } LA22: ; LOC33 = (Ropeobj180006*)0; LOC33 = gensectionstart_532081_2760143328(((Tcprocsection531011) 1)); add_180482_2381377266(&prc0, LOC33); LOC34 = (Ropeobj180006**)0; LOC34 = s_531179_3723162438((*m0).preinitproc, ((Tcprocsection531011) 1)); add_180482_2381377266(&prc0, (*LOC34)); LOC35 = (Ropeobj180006**)0; LOC35 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 1)); add_180482_2381377266(&prc0, (*LOC35)); LOC36 = (Ropeobj180006**)0; LOC36 = s_531179_3723162438((*m0).postinitproc, ((Tcprocsection531011) 1)); add_180482_2381377266(&prc0, (*LOC36)); LOC37 = (Ropeobj180006*)0; LOC37 = gensectionend_532116_2760143328(((Tcprocsection531011) 1)); add_180482_2381377266(&prc0, LOC37); LOC38 = (Ropeobj180006*)0; LOC38 = gensectionstart_532081_2760143328(((Tcprocsection531011) 2)); add_180482_2381377266(&prc0, LOC38); LOC39 = (Ropeobj180006**)0; LOC39 = s_531179_3723162438((*m0).preinitproc, ((Tcprocsection531011) 2)); add_180482_2381377266(&prc0, (*LOC39)); LOC40 = (Ropeobj180006**)0; LOC40 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 2)); add_180482_2381377266(&prc0, (*LOC40)); LOC41 = (Ropeobj180006**)0; LOC41 = s_531179_3723162438((*m0).postinitproc, ((Tcprocsection531011) 2)); add_180482_2381377266(&prc0, (*LOC41)); LOC42 = (Ropeobj180006*)0; LOC42 = gensectionend_532116_2760143328(((Tcprocsection531011) 2)); add_180482_2381377266(&prc0, LOC42); { NIM_BOOL LOC45; Ropeobj180006* LOC49; LOC45 = (NIM_BOOL)0; LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0); if (!(LOC45)) goto LA46; LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 0))&7U)))!=0)); LA46: ; if (!LOC45) goto LA47; LOC49 = (Ropeobj180006*)0; LOC49 = deinitframe_562150_839829468((*m0).initproc); add_180482_2381377266(&prc0, LOC49); } LA47: ; LOC50 = (Ropeobj180006*)0; LOC50 = deinitgcframe_540441_839829468((*m0).initproc); add_180482_2381377266(&prc0, LOC50); memset((void*)LOC51, 0, sizeof(LOC51)); addf_181205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0); memset((void*)LOC52, 0, sizeof(LOC52)); LOC52[0] = getdatinitname_564239_839829468((*m0).module); addf_181205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1); { Tcfilesection531005 i_564401_839829468; NI res_564482_839829468; i_564401_839829468 = (Tcfilesection531005)0; res_564482_839829468 = ((NI) 12); { while (1) { Ropeobj180006* LOC56; Ropeobj180006* LOC57; if (!(res_564482_839829468 <= ((NI) 16))) goto LA55; i_564401_839829468 = ((Tcfilesection531005) (res_564482_839829468)); LOC56 = (Ropeobj180006*)0; LOC56 = gensectionstart_532015_2760143328(i_564401_839829468); add_180482_2381377266(&prc0, LOC56); add_180482_2381377266(&prc0, (*m0).s[(i_564401_839829468)- 0]); LOC57 = (Ropeobj180006*)0; LOC57 = gensectionend_532050_2760143328(i_564401_839829468); add_180482_2381377266(&prc0, LOC57); res_564482_839829468 += ((NI) 1); } LA55: ; } } memset((void*)LOC58, 0, sizeof(LOC58)); addf_181205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 11))- 0], prc0); { NIM_CHAR i_564442_839829468; Ropeobj180006* el_564443_839829468; TY531136 HEX3Atmp_564487_839829468; NIM_CHAR i_564490_839829468; i_564442_839829468 = (NIM_CHAR)0; el_564443_839829468 = (Ropeobj180006*)0; memset((void*)HEX3Atmp_564487_839829468, 0, sizeof(HEX3Atmp_564487_839829468)); memcpy((void*)HEX3Atmp_564487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_564487_839829468)); i_564490_839829468 = 48; { if (!((NU8)(((NIM_CHAR) (((NU8)(i_564490_839829468))))) <= (NU8)(57))) goto LA62; { while (1) { i_564442_839829468 = i_564490_839829468; el_564443_839829468 = HEX3Atmp_564487_839829468[(((NU8)(i_564490_839829468)))- 48]; { Ropeobj180006* ex0; TY534811 LOC70; if (!!((el_564443_839829468 == NIM_NIL))) goto LA68; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rope_180401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_564442_839829468)))) - ((NI) 48))))); LOC70[1] = el_564443_839829468; ex0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 11))- 0], ex0); } LA68: ; { if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_564490_839829468))))))) goto LA73; goto LA64; } LA73: ; i_564490_839829468 += ((NI) 1); } } LA64: ; } LA62: ; } } N_NIMCALL(void, finishtypedescriptions_537842_839829468)(Tcgen531027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Ropeobj180006* LOC3; if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2; LOC3 = (Ropeobj180006*)0; LOC3 = gettypedesc_537671_839829468(m0, (*m0).typestack->data[i0]); i0 += ((NI) 1); } LA2: ; } } N_NIMCALL(Ropeobj180006*, getcopyright_563665_839829468)(NimStringDesc* cfile0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; { TY180507 LOC5; if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 4))&63U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_180277_2381377266(((NimStringDesc*) &T839829468_686)); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1); } goto LA1; LA3: ; { TY538475 LOC7; NimStringDesc* LOC8; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rope_180277_2381377266(((NimStringDesc*) &T839829468_686)); LOC7[1] = rope_180277_2381377266(Os_178068_4151366050[(targetos_178629_4151366050)- 1].Field0); LOC7[2] = rope_180277_2381377266(Cpu_178496_4151366050[(targetcpu_178627_4151366050)- 1].Field0); LOC7[3] = rope_180277_2381377266(Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field0); LOC8 = (NimStringDesc*)0; LOC8 = getcompilecfilecmd_276284_2528170400(cfile0, NIM_FALSE); LOC7[4] = rope_180277_2381377266(LOC8); result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5); } LA1: ; return result0; } static N_INLINE(void, addinttypes_563659_839829468)(Ropeobj180006** result0) { NimStringDesc* LOC1; TY180507 LOC2; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_178644_4151366050->Sup.len + 22); appendString(LOC1, ((NimStringDesc*) &T839829468_688)); appendString(LOC1, tnl_178644_4151366050); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rope_180401_2381377266(((NI64) (Cpu_178496_4151366050[(targetcpu_178627_4151366050)- 1].Field1))); addf_181205_2381377266(result0, LOC1, LOC2, 1); } N_NIMCALL(Ropeobj180006*, getfileheader_563683_839829468)(NimStringDesc* cfile0) { Ropeobj180006* result0; result0 = (Ropeobj180006*)0; result0 = getcopyright_563665_839829468(cfile0); addinttypes_563659_839829468(&result0); return result0; } N_NIMCALL(void, generatethreadlocalstorage_540717_839829468)(Tcgen531027* m0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY180507 LOC13; LOC3 = (NIM_BOOL)0; LOC3 = !((nimtv_540656_839829468 == NIM_NIL)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag531025) 1))&7U)))!=0); if (LOC5) goto LA6; LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; { Ttype294840* t_540761_839829468; NI i_540768_839829468; NI L_540770_839829468; t_540761_839829468 = (Ttype294840*)0; i_540768_839829468 = ((NI) 0); L_540770_839829468 = (nimtvdeps_540674_839829468 ? nimtvdeps_540674_839829468->Sup.len : 0); { while (1) { Ropeobj180006* LOC12; if (!(i_540768_839829468 < L_540770_839829468)) goto LA11; t_540761_839829468 = nimtvdeps_540674_839829468->data[i_540768_839829468]; LOC12 = (Ropeobj180006*)0; LOC12 = gettypedesc_537671_839829468(m0, t_540761_839829468); i_540768_839829468 += ((NI) 1); } LA11: ; } } memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = nimtv_540656_839829468; addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1); } LA7: ; } N_NIMCALL(void, generateheaders_562104_839829468)(Tcgen531027* m0) { NimStringDesc* LOC1; Tstrentry148009* it0; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_178644_4151366050->Sup.len + tnl_178644_4151366050->Sup.len + 20); appendString(LOC1, tnl_178644_4151366050); appendString(LOC1, ((NimStringDesc*) &T839829468_690)); appendString(LOC1, tnl_178644_4151366050); add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], LOC1); it0 = ((Tstrentry148009*) ((*m0).headerfiles.head)); { while (1) { if (!!((it0 == NIM_NIL))) goto LA3; { NimStringDesc* LOC8; NimStringDesc* LOC9; Ropeobj180006* LOC10; if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nsuReplaceChar((*it0).data, 96, 34); LOC8 = rawNewString(LOC9->Sup.len + tnl_178644_4151366050->Sup.len + 0); appendString(LOC8, LOC9); appendString(LOC8, tnl_178644_4151366050); LOC10 = (Ropeobj180006*)0; LOC10 = rope_180277_2381377266(LOC8); add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], LOC10); } goto LA4; LA6: ; { TY180507 LOC14; if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_180277_2381377266((*it0).data); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1); } goto LA4; LA12: ; { TY180507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_180277_2381377266((*it0).data); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1); } LA4: ; it0 = ((Tstrentry148009*) ((*it0).Sup.next)); } LA3: ; } } N_NIMCALL(Ropeobj180006*, genmodule_564491_839829468)(Tcgen531027* m0, NimStringDesc* cfile0) { Ropeobj180006* result0; Ropeobj180006* LOC1; result0 = (Ropeobj180006*)0; result0 = getfileheader_563683_839829468(cfile0); LOC1 = (Ropeobj180006*)0; LOC1 = genmergeinfo_532203_2760143328(m0); add_180482_2381377266(&result0, LOC1); generatethreadlocalstorage_540717_839829468(m0); generateheaders_562104_839829468(m0); { Tcfilesection531005 i_564614_839829468; NI res_564622_839829468; i_564614_839829468 = (Tcfilesection531005)0; res_564622_839829468 = ((NI) 1); { while (1) { Ropeobj180006* LOC5; Ropeobj180006* LOC6; if (!(res_564622_839829468 <= ((NI) 10))) goto LA4; i_564614_839829468 = ((Tcfilesection531005) (res_564622_839829468)); LOC5 = (Ropeobj180006*)0; LOC5 = gensectionstart_532015_2760143328(i_564614_839829468); add_180482_2381377266(&result0, LOC5); add_180482_2381377266(&result0, (*m0).s[(i_564614_839829468)- 0]); LOC6 = (Ropeobj180006*)0; LOC6 = gensectionend_532050_2760143328(i_564614_839829468); add_180482_2381377266(&result0, LOC6); res_564622_839829468 += ((NI) 1); } LA4: ; } } add_180482_2381377266(&result0, (*m0).s[(((Tcfilesection531005) 11))- 0]); return result0; } N_NIMCALL(void, updatecachedmodule_565813_839829468)(Tcgen531027* m0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_565204_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj180006* code0; LOC3 = (NIM_BOOL)0; LOC3 = mergerequired_532832_2760143328(m0); if (!(LOC3)) goto LA4; LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)); LA4: ; if (!LOC3) goto LA5; mergefiles_533241_2760143328(cfile0, m0); geninitcode_564286_839829468(m0); finishtypedescriptions_537842_839829468(m0); code0 = genmodule_564491_839829468(m0, cfile0); writerope_180836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_275863_2528170400(cfile0); } LA5: ; addfiletolink_275872_2528170400(cfilenoext0); } N_NIMCALL(void, generatethreadvarssize_540771_839829468)(Tcgen531027* m0) { { NimStringDesc* externc0; TY180507 LOC12; if (!!((nimtv_540656_839829468 == NIM_NIL))) goto LA3; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = !((gcmd_171132_2607990831 == ((Tcommands171076) 2))); if (!(LOC7)) goto LA8; LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; externc0 = copyString(((NimStringDesc*) &T839829468_693)); } goto LA5; LA9: ; { externc0 = copyString(((NimStringDesc*) &T839829468_490)); } LA5: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_180277_2381377266(externc0); addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1); } LA3: ; } N_NIMCALL(NIM_BOOL, shouldrecompile_565621_839829468)(Ropeobj180006* code0, NimStringDesc* cfile0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; result0 = NIM_TRUE; { NimStringDesc* objfile0; if (!!(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 1))&63U)))!=0))) goto LA3; objfile0 = toobjfile_275859_2528170400(cfile0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = writeropeifnotequal_181511_2381377266(code0, cfile0); if (!LOC7) goto LA8; goto BeforeRet; } LA8: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = nosexistsFile(objfile0); if (!(LOC12)) goto LA13; LOC12 = nosfileNewer(objfile0, cfile0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; } LA14: ; } goto LA1; LA3: ; { writerope_180836_2381377266(code0, cfile0, NIM_FALSE); } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(void, writemodule_565637_839829468)(Tcgen531027* m0, NIM_BOOL pending0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_565204_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj180006* code0; LOC3 = (NIM_BOOL)0; LOC3 = !((*m0).Sup.fromcache); if (LOC3) goto LA4; LOC3 = ((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 1))&63U)))!=0); LA4: ; if (!LOC3) goto LA5; geninitcode_564286_839829468(m0); finishtypedescriptions_537842_839829468(m0); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA9; add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], mainmodprocs_531148_3723162438); generatethreadvarssize_540771_839829468(m0); } LA9: ; code0 = genmodule_564491_839829468(m0, cfile0); { NIM_BOOL LOC13; LOC13 = (NIM_BOOL)0; LOC13 = shouldrecompile_565621_839829468(code0, cfile0); if (!LOC13) goto LA14; addfiletocompile_275863_2528170400(cfile0); } LA14: ; } goto LA1; LA5: ; { NIM_BOOL LOC17; NIM_BOOL LOC18; Ropeobj180006* code0; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = pending0; if (!(LOC18)) goto LA19; LOC18 = mergerequired_532832_2760143328(m0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)); LA20: ; if (!LOC17) goto LA21; mergefiles_533241_2760143328(cfile0, m0); geninitcode_564286_839829468(m0); finishtypedescriptions_537842_839829468(m0); code0 = genmodule_564491_839829468(m0, cfile0); writerope_180836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_275863_2528170400(cfile0); } goto LA1; LA21: ; { NimStringDesc* LOC24; NIM_BOOL LOC25; LOC24 = (NimStringDesc*)0; LOC24 = toobjfile_275859_2528170400(cfilenoext0); LOC25 = (NIM_BOOL)0; LOC25 = nosexistsFile(LOC24); if (!!(LOC25)) goto LA26; addfiletocompile_275863_2528170400(cfile0); } goto LA1; LA26: ; LA1: ; addfiletolink_275872_2528170400(cfilenoext0); } N_NIMCALL(void, writeheader_565152_839829468)(Tcgen531027* m0) { Ropeobj180006* result0; Ropeobj180006* guard0; TY180507 LOC1; TY129506 LOC2; TY180507 LOC3; TY535289 LOC13; TY180507 LOC14; result0 = getcopyright_563665_839829468((*m0).filename); memset((void*)LOC1, 0, sizeof(LOC1)); memset((void*)(&LOC2), 0, sizeof(LOC2)); nossplitFile((*m0).filename, (&LOC2)); LOC1[0] = rope_180277_2381377266(LOC2.Field1); guard0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = guard0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1); addinttypes_563659_839829468(&result0); generateheaders_562104_839829468(m0); generatethreadlocalstorage_540717_839829468(m0); { Tcfilesection531005 i_565174_839829468; NI res_565200_839829468; i_565174_839829468 = (Tcfilesection531005)0; res_565200_839829468 = ((NI) 1); { while (1) { Ropeobj180006* LOC7; Ropeobj180006* LOC8; if (!(res_565200_839829468 <= ((NI) 10))) goto LA6; i_565174_839829468 = ((Tcfilesection531005) (res_565200_839829468)); LOC7 = (Ropeobj180006*)0; LOC7 = gensectionstart_532015_2760143328(i_565174_839829468); add_180482_2381377266(&result0, LOC7); add_180482_2381377266(&result0, (*m0).s[(i_565174_839829468)- 0]); LOC8 = (Ropeobj180006*)0; LOC8 = gensectionend_532050_2760143328(i_565174_839829468); add_180482_2381377266(&result0, LOC8); res_565200_839829468 += ((NI) 1); } LA6: ; } } add_180482_2381377266(&result0, (*m0).s[(((Tcfilesection531005) 11))- 0]); { if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 8))&63U)))!=0)) goto LA11; add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } LA11: ; memset((void*)LOC13, 0, sizeof(LOC13)); addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = guard0; addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1); writerope_180836_2381377266(result0, (*m0).filename, NIM_FALSE); } N_NIMCALL(void, cgenwritemodules_565902_839829468)(void) { { if (!!((generatedheader_534201_839829468 == NIM_NIL))) goto LA3; finishmodule_565420_839829468(generatedheader_534201_839829468); } LA3: ; { while (1) { if (!(((NI) 0) < gforwardedprocscounter_531171_3723162438)) goto LA6; { Tcgen531027* m_565916_839829468; m_565916_839829468 = (Tcgen531027*)0; { NI i_565935_839829468; NI HEX3Atmp_565937_839829468; NI res_565939_839829468; i_565935_839829468 = (NI)0; HEX3Atmp_565937_839829468 = (NI)0; HEX3Atmp_565937_839829468 = (gmodules_531170_3723162438 ? (gmodules_531170_3723162438->Sup.len-1) : -1); res_565939_839829468 = ((NI) 0); { while (1) { if (!(res_565939_839829468 <= HEX3Atmp_565937_839829468)) goto LA10; i_565935_839829468 = res_565939_839829468; { if (!!((gmodules_531170_3723162438->data[i_565935_839829468] == NIM_NIL))) goto LA13; m_565916_839829468 = gmodules_531170_3723162438->data[i_565935_839829468]; { if (!!((*m_565916_839829468).Sup.fromcache)) goto LA17; finishmodule_565420_839829468(m_565916_839829468); } LA17: ; } LA13: ; res_565939_839829468 += ((NI) 1); } LA10: ; } } } } LA6: ; } { Tcgen531027* m_565917_839829468; m_565917_839829468 = (Tcgen531027*)0; { NI i_565946_839829468; NI HEX3Atmp_565948_839829468; NI res_565950_839829468; i_565946_839829468 = (NI)0; HEX3Atmp_565948_839829468 = (NI)0; HEX3Atmp_565948_839829468 = (gmodules_531170_3723162438 ? (gmodules_531170_3723162438->Sup.len-1) : -1); res_565950_839829468 = ((NI) 0); { while (1) { if (!(res_565950_839829468 <= HEX3Atmp_565948_839829468)) goto LA22; i_565946_839829468 = res_565950_839829468; { if (!!((gmodules_531170_3723162438->data[i_565946_839829468] == NIM_NIL))) goto LA25; m_565917_839829468 = gmodules_531170_3723162438->data[i_565946_839829468]; { if (!(*m_565917_839829468).Sup.fromcache) goto LA29; updatecachedmodule_565813_839829468(m_565917_839829468); } goto LA27; LA29: ; { writemodule_565637_839829468(m_565917_839829468, NIM_TRUE); } LA27: ; } LA25: ; res_565950_839829468 += ((NI) 1); } LA22: ; } } } writemapping_276789_2528170400(gmapping_531152_3723162438); { if (!!((generatedheader_534201_839829468 == NIM_NIL))) goto LA34; writeheader_565152_839829468(generatedheader_534201_839829468); } LA34: ; } N_NIMCALL(void, nullify_564833_839829468)(Ropeobj180006** arr0) { { Tcfilesection531005 i_564848_839829468; NI res_564853_839829468; i_564848_839829468 = (Tcfilesection531005)0; res_564853_839829468 = ((NI) 0); { while (1) { if (!(res_564853_839829468 <= ((NI) 17))) goto LA3; i_564848_839829468 = ((Tcfilesection531005) (res_564853_839829468)); unsureAsgnRef((void**) (&arr0[(i_564848_839829468)- 0]), NIM_NIL); res_564853_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, nullify_564858_839829468)(Ropeobj180006** arr0) { { NIM_CHAR i_565014_839829468; NI res_565019_839829468; i_565014_839829468 = (NIM_CHAR)0; res_565019_839829468 = ((NI) 48); { while (1) { if (!(res_565019_839829468 <= ((NI) 57))) goto LA3; i_565014_839829468 = ((NIM_CHAR) (res_565019_839829468)); unsureAsgnRef((void**) (&arr0[(((NU8)(i_565014_839829468)))- 48]), NIM_NIL); res_565019_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, resetmodule_564763_839829468)(Tcgen531027* m0) { initlinkedlist_148031_3771138726((&(*m0).headerfiles)); initintset_270885_2627731572((&(*m0).declaredprotos)); initidtable_298019_850551059((&(*m0).forwtypecache)); asgnRef((void**) (&(*m0).initproc), newproc_531206_3723162438(NIM_NIL, m0)); (*(*m0).initproc).options = initprocoptions_564635_839829468(m0); asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_564625_839829468(m0)); asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_564630_839829468(m0)); initnodetable_298085_850551059((&(*m0).datacache)); if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack); (*m0).typestack = (Ttypeseq294836*) newSeqRC1((&NTI294836), 0); if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs); (*m0).forwardedprocs = (Tsymseq294804*) newSeqRC1((&NTI294804), 0); asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_535596_839829468(m0)); asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_535596_839829468(m0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0)) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 0))%(sizeof(NU8)*8)); } goto LA1; LA3: ; { (*m0).flags &= ~(((NU8)1) << ((((Codegenflag531025) 0)) % (sizeof(NU8)*8))); } LA1: ; nullify_564833_839829468((*m0).s); (*m0).typenodes = ((NI) 0); (*m0).nimtypes = ((NI) 0); nullify_564858_839829468((*m0).extensionloaders); (*m0).Sup.fromcache = NIM_TRUE; } N_NIMCALL(void, resetcgenmodules_565024_839829468)(void) { { Tcgen531027* m_565026_839829468; m_565026_839829468 = (Tcgen531027*)0; { NI i_565031_839829468; NI HEX3Atmp_565033_839829468; NI res_565035_839829468; i_565031_839829468 = (NI)0; HEX3Atmp_565033_839829468 = (NI)0; HEX3Atmp_565033_839829468 = (gmodules_531170_3723162438 ? (gmodules_531170_3723162438->Sup.len-1) : -1); res_565035_839829468 = ((NI) 0); { while (1) { if (!(res_565035_839829468 <= HEX3Atmp_565033_839829468)) goto LA4; i_565031_839829468 = res_565035_839829468; { if (!!((gmodules_531170_3723162438->data[i_565031_839829468] == NIM_NIL))) goto LA7; m_565026_839829468 = gmodules_531170_3723162438->data[i_565031_839829468]; resetmodule_564763_839829468(m_565026_839829468); } LA7: ; res_565035_839829468 += ((NI) 1); } LA4: ; } } } } NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) { nimRegisterGlobalMarker(T839829468_2); nimRegisterGlobalMarker(T839829468_3); nimRegisterGlobalMarker(T839829468_5); nimRegisterGlobalMarker(T839829468_6); nimRegisterGlobalMarker(T839829468_7); nimRegisterGlobalMarker(T839829468_8); asgnRefNoCycle((void**) (&indent_534655_839829468), rope_180277_2381377266(((NimStringDesc*) &T839829468_4))); if (nimtvdeps_540674_839829468) nimGCunrefNoCycle(nimtvdeps_540674_839829468); nimtvdeps_540674_839829468 = (Ttypeseq294836*) newSeqRC1((&NTI294836), 0); chckNil((void*)(&nimtvdeclared_540675_839829468)); genericReset((void*)(&nimtvdeclared_540675_839829468), (&NTI270030)); initintset_270885_2627731572((&nimtvdeclared_540675_839829468)); breakpointid_550860_839829468 = ((NI) 0); } NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) { }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE, UNION_TYPE or ENUMERAL_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) \ TYPE_LANG_SLOT_1 (TREE_CHECK4 (TYPE, RECORD_TYPE, UNION_TYPE, \ QUAL_UNION_TYPE, ENUMERAL_TYPE)) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) \ DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !fndecl_built_in_p (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) \ TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE)) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; /* The source range of this expression. This is redundant for node values that have locations, but not all node kinds have locations (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ source_range src_range; /* Access to the first and last locations within the source spelling of this expression. */ location_t get_start () const { return src_range.m_start; } location_t get_finish () const { return src_range.m_finish; } location_t get_location () const { if (EXPR_HAS_LOCATION (value)) return EXPR_LOCATION (value); else return make_location (get_start (), get_start (), get_finish ()); } /* Set the value to error_mark_node whilst ensuring that src_range is initialized. */ void set_error () { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* Likewise, with standard attributes present in the reference. */ ctsk_tagref_attrs, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* Likewise, with standard attributes present in the reference. */ ctsk_tagfirstref_attrs, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the GNU attributes and prefix standard attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* When parsing, postfix standard attributes (which appertain to the type specified by the preceding declaration specifiers, unlike prefix standard attributes which appertain to the declaration or declarations as a whole). */ tree postfix_attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char *gimple_or_rtl_pass; /* ENTRY BB count. */ profile_count entry_bb_count; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* For the _FloatN and _FloatNx declspec, this stores the index into the floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 4; ENUM_BITFIELD (c_declspec_il) declspec_il : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether any declaration specifiers other than standard attributes have been seen at all. If only standard attributes have been seen, this is an attribute-declaration. */ BOOL_BITFIELD non_std_attrs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc> *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* For identifiers. */ struct { /* An IDENTIFIER_NODE, or NULL_TREE if an abstract declarator. */ tree id; /* Any attributes (which apply to the declaration rather than to the type described by the outer declarators). */ tree attrs; } id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init (void); extern bool c_keyword_starts_typename (enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; class c_struct_parse_info; extern struct obstack parser_obstack; /* Set to IN_ITERATION_STMT if parsing an iteration-statement, to IN_OMP_BLOCK if parsing OpenMP structured block and IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement, this is bitwise ORed with IN_SWITCH_STMT, unless parsing an iteration-statement, OpenMP block or loop within that switch. */ #define IN_SWITCH_STMT 1 #define IN_ITERATION_STMT 2 #define IN_OMP_BLOCK 4 #define IN_OMP_FOR 8 #define IN_OBJC_FOREACH 16 extern unsigned char in_statement; extern bool switch_statement_break_seen_p; extern bool global_bindings_p (void); extern tree pushdecl (tree); extern void push_scope (void); extern tree pop_scope (void); extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); extern void record_inline_static (location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing (void); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (location_t, bool); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (location_t, tree); extern tree lookup_label_for_goto (location_t, tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings (void); extern void c_release_switch_bindings (struct c_spot_bindings *); extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, location_t, location_t); extern void finish_decl (tree, location_t, tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (location_t = input_location); extern tree finish_struct (location_t, tree, tree, tree, class c_struct_parse_info *); extern tree c_simulate_enum_decl (location_t, const char *, vec<string_int_pair>); extern struct c_arg_info *build_arg_info (void); extern struct c_arg_info *get_parm_info (bool, tree); extern tree grokfield (location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *, tree *, bool *); extern tree grokparm (const struct c_parm *, tree *); extern tree implicitly_declare (location_t, tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (void); extern void c_pop_function_context (void); extern void push_parm_decl (const struct c_parm *, tree *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern tree c_builtin_function_ext_scope (tree); extern tree c_simulate_builtin_function_decl (tree); extern void c_warn_unused_attributes (tree); extern tree c_warn_type_attributes (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (location_t, struct c_enum_contents *, tree); extern bool start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree, location_t * = NULL); extern tree start_struct (location_t, enum tree_code, tree, class c_struct_parse_info **); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern void temp_store_parm_decls (tree, tree); extern void temp_pop_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree, bool, tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *, location_t); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_addrspace (location_t, struct c_declspecs *, addr_space_t); extern struct c_declspecs *declspecs_add_alignas (location_t, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); extern alias_set_type c_get_alias_set (tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern bool c_in_omp_for; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion (location_t, tree); extern tree require_complete_type (location_t, tree); extern bool same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern int comptypes_check_different_types (tree, tree, bool *); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree, bool = false); extern void c_incomplete_type_error (location_t, const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion (location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1 (tree, bool); extern void mark_exp_read (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (location_t, tree, tree, location_t); extern tree build_array_ref (location_t, tree, tree); extern tree build_external_ref (location_t, tree, bool, tree *); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op (location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr (location_t, tree, tree); extern tree c_cast_expr (location_t, struct c_type_name *, tree); extern tree build_c_cast (location_t, tree, tree); extern void store_init_value (location_t, tree, tree, tree); extern void maybe_warn_string_init (location_t, tree, struct c_expr); extern void start_init (tree, tree, int, rich_location *); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void finish_implicit_inits (location_t, struct obstack *); extern void push_init_level (location_t, int, struct obstack *); extern struct c_expr pop_init_level (location_t, int, struct obstack *, location_t); extern void set_init_index (location_t, tree, tree, struct obstack *); extern void set_init_label (location_t, tree, location_t, struct obstack *); extern void process_init_element (location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal (location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type (location_t, struct c_type_name *); extern tree c_start_switch (location_t, location_t, tree, bool); extern void c_finish_switch (tree, tree); extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt (bool, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (location_t, tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree); extern void c_finish_loop (location_t, location_t, tree, location_t, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (location_t, tree); extern tree c_process_expr_stmt (location_t, tree); extern tree c_finish_expr_stmt (location_t, tree); extern tree c_finish_return (location_t, tree, tree); extern tree c_finish_bc_stmt (location_t, tree, bool); extern tree c_finish_goto_label (location_t, tree); extern tree c_finish_goto_ptr (location_t, tree); extern tree c_expr_to_decl (tree, bool *, bool *); extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data (location_t, tree, tree); extern tree c_finish_oacc_host_data (location_t, tree, tree); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (location_t, tree, tree); extern tree c_begin_omp_task (void); extern tree c_finish_omp_task (location_t, tree, tree); extern void c_finish_omp_cancel (location_t, tree); extern void c_finish_omp_cancellation_point (location_t, tree); extern tree c_finish_omp_clauses (tree, enum c_omp_region_type); extern tree c_build_va_arg (location_t, tree, location_t, tree); extern tree c_finish_transaction (location_t, tree, int); extern bool c_tree_equal (tree, tree); extern tree c_build_function_call_vec (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *); extern tree c_omp_clause_copy_ctor (tree, tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl (tree); extern tree c_omp_reduction_id (enum tree_code, tree); extern tree c_omp_reduction_decl (tree); extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern bool c_check_in_current_scope (tree); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); extern bool tag_exists_p (enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c11 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern void set_c_expr_source_range (c_expr *expr, location_t start, location_t finish); extern void set_c_expr_source_range (c_expr *expr, source_range src_range); /* In c-fold.c */ extern vec<tree> incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
vednnConvolutionForward.c
#include "vednnConvolutionForward.h" #include "vednn-def.h" #include <stdint.h> #include <assert.h> #include <stdio.h> #ifdef __cplusplus extern "C" { //} #endif inline vednnError_t vednnConvolutionForward_mb_threads( vednnConvForward_t pFunc, VEDNN_CONVFWD_ARGS ) { #ifndef VEDNN_USE_OPENMP return pFunc(VEDNN_CONVFWD_ARGS_LIST); #else int64_t allBatch = pParamIn->batch; // check as in vednnx if (allBatch == 1 || __vednn_omp_num_threads == 1) { return pFunc(VEDNN_CONVFWD_ARGS_LIST); }else{ //vednnError_t rc = VEDNN_SUCCESS ; int rc = VEDNN_SUCCESS ; // for C++... now explicitly return valid enum //#pragma omp parallel reduction(|:rc) // Above is permitted if pFunc has no omp barriers, // but DEADLY (hang) if pFunc has an omp barrier! // Why? myBatch==0 test would NOT allow all threads to reach synchronization // points in pFunc // Ex. pFunc allocates a shared read-only scratchpad that expects // **all** threads to synchronize on the size needed // So instead set num_threads so myBatch>0 always. int par = omp_get_max_threads(); if (allBatch < par) par = allBatch; // avoiding myBatch==0 is important. #pragma omp parallel num_threads(par) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t nBatch = allBatch / nthreads ; int64_t remain = allBatch % nthreads ; int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ; assert(myBatch > 0); // NEW (bugfix) //if( myBatch == 0 ) { // rc |= VEDNN_SUCCESS ; //}else{ vednnTensorParam_t _pParamIn = *pParamIn ; _pParamIn.batch = myBatch ; vednnTensorParam_t _pParamOut = *pParamOut ; _pParamOut.batch = myBatch ; float* _pDataIn = ((float *)pDataIn) + batchBegin * pParamIn->channel * pParamIn->height * pParamIn->width ; float* _pDataOut = ((float *)pDataOut) + batchBegin * pParamOut->channel * pParamOut->height * pParamOut->width ; rc |= pFunc(&_pParamIn, (void*)_pDataIn, pParamKernel, pDataKernel, pParamBias, pDataBias, pParamConv, &_pParamOut, (void*) _pDataOut) ; //} } return (vednnError_t)(rc<3? rc: 1); // 3 is not an allowed value } #endif } /* ----------------------------------------------------------------------- */ /** Weak Library symbol: override to test improved strategies. * * \return rc==VEDNN_SUCCESS and pFunc non-null, * or rc==VEDNN_ERROR_INVALID_PARM */ vednnCnvFwdChoice_t vednnConvolutionForwardChoice( VEDNN_CONVFWD_API_ARGS ) { // decision tree will set rc, pFunc, impl and wrapper type vednnError_t rc = VEDNN_SUCCESS; vednnConvForward_t pFunc = NULL; // internal function pointer char const* impl = "unset"; // internal impl name (for messages or ftrace) int mb_threads = 1; // threads-wrapper type // TODO: harmonize impl name with libvednnx (maybe via vednn.h API mods) // A quick initial INVALID_PARM check... switch( pParamKernel->layout ) { case VEDNN_FILTER_LAYOUT_NCHW : break ; case VEDNN_FILTER_LAYOUT_HWCN : if( pParamConv->group > 1 ) { fprintf(stderr, "[VEDNN ERROR] VEDNN does not support grouped convolution with filter_hwcn\n") ; rc = VEDNN_ERROR_INVALID_PARAM ; } break ; default : fprintf(stderr, "[VEDNN ERROR] Unknown Filter Layout %d\n", pParamKernel->layout) ; rc = VEDNN_ERROR_INVALID_PARAM ; } // NOTE: OMPWRAP and NOWRAP are CODE-BLOCK macros, not statements // Set normal ||ism over minibatch, and exit decision tree #define OMPWRAP( IMPL ) \ { \ impl = "mb-" #IMPL; \ pFunc = vednnConvolutionForward_direct_##IMPL; \ /*mb_threads = 1; default*/ \ break; \ } // pFunc handles ||ism internally, and exit decision tree #define NOWRAP( IMPL ) \ { \ impl = #IMPL; \ pFunc = vednnConvolutionForward_direct_##IMPL; \ mb_threads = 0; \ break; \ } if (rc == VEDNN_SUCCESS) do { // allow 'break' to easily exit after any pFunc or rc is set. if (algo == VEDNN_CONV_ALGORITHM_DIRECT) { #define DIL(N) (pParamConv->dilationHeight == (N) && pParamConv->dilationWidth == (N)) #define PAD(N) (pParamConv->padHeight == (N) && pParamConv->padWidth == (N)) #define STR(N) (pParamConv->strideHeight == (N) && pParamConv->strideWidth == (N)) #define KER(N) (pParamKernel->width == (N) && pParamKernel->height == (N)) #define IWU(N) (pParamIn->width <= (N)) #define OWU(N) (pParamOut->width <= (N)) #define ICoGU(N) (pParamIn->channel / pParamConv->group <= (N)) #define OCoGU(N) (pParamOut->channel / pParamConv->group <= (N)) if ((pParamOut->height * pParamOut->width <= 16) || ((pParamOut->height * pParamOut->width < 64) && (pParamOut->height * pParamOut->width < pParamIn->channel) // ... !(DIL(1) && STR(1) && KER(1)) ??? && ( pParamConv->dilationHeight | pParamConv->dilationWidth | pParamConv->strideHeight | pParamConv->strideWidth | pParamKernel->height | pParamKernel->width) != 1 ) ) { // small images may have a fast vecC if (KER(3) && DIL(1) && STR(1) && PAD(1)) OMPWRAP(vecC_dil1_str1_pad1_ker3)//; else if (KER(1) && DIL(1) && PAD(0) && pParamOut->height == (pParamIn->height - pParamKernel->height) / pParamConv->strideHeight + 1 && pParamOut->width == (pParamIn->width - pParamKernel->width) / pParamConv->strideWidth + 1) { if (ICoGU(1024)) OMPWRAP(vecC_dil1_pad0_ker1_cU1024)//; else OMPWRAP(vecC_dil1_pad0_ker1)//; } OMPWRAP(vecC)//; } #ifdef VEDNN_ALT_PARALLEL // resnext branch : AGGRESSIVE use of gemm for all stride > 1 ? if (!STR(1)) { // if (STR(2) && DIL(1) && PAD(1) && OWU(128)) { // if (KER(3)) OMPWRAP(dil1_str2_pad1_ker3_owU128)//; // if (KER(4)) OMPWRAP(dil1_str2_pad1_ker4_owU128)//; // } // try using gemm in most cases with stride > 1 if(OCoGU(256) && OWU(128)) NOWRAP(owU128_T)//; else NOWRAP(gemm)//; } #endif if (STR(1) && DIL(1) && pParamIn->height == pParamOut->height && pParamIn->width == pParamOut->width ) { // d1s1pS ... if (KER(1)) { #ifdef VEDNN_ALT_PARALLEL // new: CHECKME if(OWU(128)) NOWRAP(dil1_str1_pad0_ker1_T)//; //else OMPWRAP(dil1_str1_pad0_ker1)//; NOWRAP(gemm) // always faster?; #else OMPWRAP(dil1_str1_pad0_ker1)//; #endif } if (KER(3)){ // d1s1pSk3 if (pParamIn->channel == pParamConv->group){ // aka inputChannelGroup==1 if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker3_c1_owU128)//; OMPWRAP(dil1_str1_padsame_ker3_c1)//; } #ifdef VEDNN_ALT_PARALLEL if (pParamIn->batch < 8) { // checkme! if (pParamKernel->inChannel % 1024 == 0) // really!? NOWRAP(dil1_str1_padsame_ker3_c1024x_T)//; NOWRAP(dil1_str1_padsame_ker3_T)//; } #else OMPWRAP(dil1_str1_padsame_ker3) // is this ever faster?//; #endif } if (KER(5)) { // d1s1pSk5 if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker5_owU128)//; // // XXX the following change 01-29-2021 "mem error fix" // produces wrong output and even sometimes memory corruption. // Removed (perhaps revert to the memory error version? // //else if(pParamIn->height >= 5) OMPWRAP(dil1_str1_padsame_ker5)//; // // The following is a much slower substitute, gemm seems faster. //OMPWRAP(dil1_str1_padsame)//; // // uninvestigated (sometimes slightly faster): if (pParamIn->batch >= 4) OMPWRAP(gemm); NOWRAP(gemm); // this seems to do very well (often 25% faster) // } if (KER(2)) OMPWRAP(dil1_str1_padsame_ker2)//; OMPWRAP(dil1_str1_padsame)//; } // end d1s1pS if (DIL(1) && PAD(0) && pParamOut->height == (pParamIn->height - pParamKernel->height) / pParamConv->strideHeight + 1 && pParamOut->width == (pParamIn->width - pParamKernel->width) / pParamConv->strideWidth + 1 ) { // d1p0 and oh expected value if (STR(1)) { // d1s1p0 if (KER(3) // && IWU(256) // XXX original concords with impl name // XXX but actually it seems the "correctly able to run" condition is // (though often the ioaligned may not be fastest, even though code // looks good. If many channels, often 2x slower). && OWU(256) && (pParamIn->width & 0x1) == 0 && (((uint64_t)pDataIn) & 0x7) == 0 && (pParamOut->width & 0x1) == 0 && (((uint64_t)pDataOut) & 0x7) == 0 ) OMPWRAP(dil1_str1_pad0_ker3_iw2XU256_ow2X_ioaligned)//; if (KER(4) && IWU(256)) OMPWRAP(dil1_str1_pad0_ker4_iwU256)//; if (OWU(128)) OMPWRAP(dil1_str1_pad0_owU128)//; OMPWRAP(dil1_str1_pad0)//; } else if (KER(1)) { // d1s>1p0k1 if (OWU(128)) OMPWRAP(dil1_pad0_owU128_ker1)//; OMPWRAP(dil1_pad0_ker1)//; } { // d1s>1p0k>1 // todo: this part of tree seems to target d1p0owU128, mostly if (OWU(128)){ #ifdef VEDNN_ALT_PARALLEL // XXX 3 possibilities: // OMPWRAP(dil1_pad0_owU128)//; // NOWRAP(owU128_T)//; // NOWRAP(gemm)//; if(OCoGU(256)) NOWRAP(owU128_T) // NEW mb+g threading//; NOWRAP(gemm) // NEW: is this case always faster than dil1_pad0_owU128?//; #else OMPWRAP(dil1_pad0_owU128)//; #endif }else{ #ifdef VEDNN_ALT_PARALLEL NOWRAP(gemm) // always faster than dil1_pad0 ?//; #else OMPWRAP(dil1_pad0)//; #endif } } } // end d1p0 and oh expected value if (STR(2) && DIL(1) && PAD(1) && OWU(128)) { if (KER(3)) OMPWRAP(dil1_str2_pad1_ker3_owU128)//; if (KER(4)) OMPWRAP(dil1_str2_pad1_ker4_owU128)//; } if (OWU(128)) OMPWRAP(owU128)//; OMPWRAP(default)//; } }while(0); // Decision tree has set impl, pFunc and mb_threads [hopefully] if (pFunc == NULL) rc = VEDNN_ERROR_INVALID_PARAM; vednnCnvFwdChoice_t ret = { rc, impl, pFunc, mb_threads }; return ret; } #undef OCoGU #undef ICoGU #undef OWU #undef IWU #undef KER #undef STR #undef PAD #undef DIL #undef OMPWRAP #ifdef VEDNN_ALT_PARALLEL //#undef ALT_RET #undef NOWRAP #endif /* ----------------------------------------------------------------------- */ /** \b with the bias arguments. * * This implementation is non-gemm, no-intrinsics ncc code. * Surprisingly, it is the fastest impl in some cases! */ vednnError_t vednnConvolutionForwardAddBias( VEDNN_CONVFWD_API_ARGS ) { // run the decision tree vednnCnvFwdChoice_t const c = vednnConvolutionForwardChoice(VEDNN_CONVFWD_API_ARGS_LIST); vednnError_t rc = c.rc; // initial value only if (rc == VEDNN_SUCCESS) { // // debug... //fprintf(stderr, " cnvFwd-def=%s\n", c.impl); fflush(stderr); assert( c.pFunc != NULL ); // ftrace according to compile flags // Consider changing impl to reflect bias XXX FTRACE_BEGIN(c.impl); // impl likely differs from vednnConvolutionLists.c name // run with or without threading over minibatch if (c.mb_threads) { // call with default conv fwd ||ism wrapper rc = vednnConvolutionForward_mb_threads(c.pFunc, VEDNN_CONVFWD_ARGS_LIST); }else{ // call without any threading wrapper rc = c.pFunc(VEDNN_CONVFWD_ARGS_LIST); } FTRACE_END(c.impl); // note different from src/wrap vednnx extensions :( } return rc; } /* ----------------------------------------------------------------------- */ /** \b without the bias arguments (auto-supplying NULL for bias args). */ vednnError_t vednnConvolutionForward( const vednnTensorParam_t *pParamIn, const void *pDataIn, const vednnFilterParam_t *pParamKernel, const void *pDataKernel, const vednnTensorParam_t *pParamOut, void *pDataOut, const vednnConvolutionParam_t *pParamConv, vednnConvolutionAlgorithm_t algo ) { return vednnConvolutionForwardAddBias(pParamIn, pDataIn, pParamKernel, pDataKernel, NULL, NULL, pParamOut, pDataOut, pParamConv, algo ); } #ifdef __cplusplus }//extern "C" #endif // vim: et ts=2 sw=2 cindent cino=+4s,^l0,\:s syntax=cpp.doxygen
pmem-streams.c
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<string.h> #include<libpmem.h> #include<omp.h> #include"utils.h" #define ARRAY_SIZE 100000000 #define MB 1048576 #define REPEATS 10 void copy(double *, double *, long int); void scale(double *, double *, double, long int); void add(double *, double *, double *, long int); void triadd(double *, double *, double *, double, long int); void initialise(double *, double *, double *, long int); int main(int argc, char *argv[]){ struct timespec start, end; char *path; char title[100] = ""; double *a, *b, *c; char *pmemaddr = NULL; long int array_size, bytes; int repeats; int i; int array_element_size; int is_pmem; size_t mapped_len; int num_threads; if(argc != 4){ array_size = ARRAY_SIZE; repeats = REPEATS; path = ""; }else{ array_size = atoi(argv[1]); repeats = atoi(argv[2]); path = argv[3]; } a = malloc(sizeof(double)*array_size); b = malloc(sizeof(double)*array_size); c = malloc(sizeof(double)*array_size); array_element_size = sizeof(a[0]); printf("Using an array of %ld doubles (%ld MB) for experiments\n",array_size,array_size*array_element_size/MB); #pragma omp parallel shared(num_threads) { num_threads = omp_get_num_threads(); } printf("Running on %d threads\n", num_threads); printf("Memory test\n"); initialise(a,b,c,array_size); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ copy(a,b,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Copy"); elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ scale(a,c,2.4,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Scale"); elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ add(b,a,c,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Add"); elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ triadd(a,b,c,2.4,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Triadd"); elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title); free(a); free(b); free(c); printf("PMem test\n"); sprintf(path+strlen(path), "pstream_test_file"); if ((pmemaddr = pmem_map_file(path, array_size*array_element_size*3, PMEM_FILE_CREATE|PMEM_FILE_EXCL, 0666, &mapped_len, &is_pmem)) == NULL) { perror("pmem_map_file"); fprintf(stderr, "Failed to pmem_map_file for filename:%s.\n", path); exit(-100); } printf("Using file %s for pmem\n",path); a = pmemaddr; b = pmemaddr + array_size*array_element_size; c = pmemaddr + array_size*array_element_size*2; initialise(a,b,c,array_size); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ copy(a,b,array_size); pmem_persist(pmemaddr, array_size*array_element_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Copy"); elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ scale(a,c,2.4,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Scale"); elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ add(b,a,c,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Add"); elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title); clock_gettime(CLOCK_MONOTONIC, &start); for(i=0; i<repeats; i++){ triadd(a,b,c,2.4,array_size); } clock_gettime(CLOCK_MONOTONIC, &end); strcpy(title, "Triadd"); elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title); pmem_persist(pmemaddr, mapped_len); pmem_unmap(pmemaddr, mapped_len); return 0; } void initialise(double *a, double *b, double *c, long int array_size){ int j; #pragma omp parallel for for (j=0; j<array_size; j++){ a[j] = 0.0; b[j] = 2.0; c[j] = 1.0; } return; } void copy(double *a, double *b, long int array_size){ int j; #pragma omp parallel for for (j=0; j<array_size; j++){ b[j] = a[j]; } return; } void scale(double *a, double *b, double scalar, long int array_size){ int j; #pragma omp parallel for for (j=0; j<array_size; j++){ b[j] = a[j]*scalar; } return; } void add(double *a, double *b, double *c, long int array_size){ int j; #pragma omp parallel for for (j=0; j<array_size; j++){ c[j] = a[j]+b[j]; } return; } void triadd(double *a, double *b, double *c, double scalar, long int array_size){ int j; #pragma omp parallel for for (j=0; j<array_size; j++){ c[j] = a[j]+b[j]*scalar; } return; }
DRB045-doall1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Simplest one dimension array computation */ int a[100]; int main() { int i; #pragma omp parallel for private(i ) for (i=0;i<100;i++) a[i]=i; #pragma omp parallel for private(i ) for (i=0;i<100;i++) a[i]=a[i]+1; for (i=0;i<100;i++) printf("%d\n",a[i]); return 0; }
pr34964.c
/* PR c++/34964 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ char x[] = 0; /* { dg-error "invalid initializer" } */ #pragma omp threadprivate (x)
feature_group.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifndef LIGHTGBM_FEATURE_GROUP_H_ #define LIGHTGBM_FEATURE_GROUP_H_ #include <LightGBM/bin.h> #include <LightGBM/meta.h> #include <LightGBM/utils/random.h> #include <cstdio> #include <memory> #include <vector> namespace LightGBM { class Dataset; class DatasetLoader; struct TrainingShareStates; class MultiValBinWrapper; /*! \brief Using to store data and providing some operations on one feature * group*/ class FeatureGroup { public: friend Dataset; friend DatasetLoader; friend TrainingShareStates; friend MultiValBinWrapper; /*! * \brief Constructor * \param num_feature number of features of this group * \param bin_mappers Bin mapper for features * \param num_data Total number of data * \param is_enable_sparse True if enable sparse feature */ FeatureGroup(int num_feature, int8_t is_multi_val, std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(num_feature), is_multi_val_(is_multi_val > 0), is_sparse_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), num_feature); auto& ref_bin_mappers = *bin_mappers; double sum_sparse_rate = 0.0f; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); sum_sparse_rate += bin_mappers_.back()->sparse_rate(); } sum_sparse_rate /= num_feature_; int offset = 1; is_dense_multi_val_ = false; if (sum_sparse_rate < MultiValBin::multi_val_bin_sparse_threshold && is_multi_val_) { // use dense multi val bin offset = 0; is_dense_multi_val_ = true; } // use bin at zero to store most_freq_bin only when not using dense multi val bin num_total_bin_ = offset; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, is_multi_val_, true, false); } FeatureGroup(const FeatureGroup& other, int num_data) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_dense_multi_val_ = other.is_dense_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } CreateBinData(num_data, is_multi_val_, !is_sparse_, is_sparse_); } FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(1), is_multi_val_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), 1); // use bin at zero to store default_bin num_total_bin_ = 1; is_dense_multi_val_ = false; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, false, false, false); } /*! * \brief Constructor from memory * \param memory Pointer of memory * \param num_all_data Number of global data * \param local_used_indices Local used indices, empty means using all data */ FeatureGroup(const void* memory, data_size_t num_all_data, const std::vector<data_size_t>& local_used_indices) { const char* memory_ptr = reinterpret_cast<const char*>(memory); // get is_sparse is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_multi_val_)); is_dense_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_dense_multi_val_)); is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_sparse_)); num_feature_ = *(reinterpret_cast<const int*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(num_feature_)); // get bin mapper bin_mappers_.clear(); bin_offsets_.clear(); // start from 1, due to need to store zero bin in this slot num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(new BinMapper(memory_ptr)); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); memory_ptr += bin_mappers_[i]->SizesInByte(); } data_size_t num_data = num_all_data; if (!local_used_indices.empty()) { num_data = static_cast<data_size_t>(local_used_indices.size()); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices); memory_ptr += multi_bin_data_.back()->SizesInByte(); } } else { if (is_sparse_) { bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } // get bin data bin_data_->LoadFromMemory(memory_ptr, local_used_indices); } } /*! \brief Destructor */ ~FeatureGroup() {} /*! * \brief Push one record, will auto convert to bin and push to bin data * \param tid Thread id * \param idx Index of record * \param value feature value of record */ inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) { uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value); if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; } if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) { bin -= 1; } if (is_multi_val_) { multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1); } else { bin += bin_offsets_[sub_feature_idx]; bin_data_->Push(tid, line_idx, bin); } } void ReSize(int num_data) { if (!is_multi_val_) { bin_data_->ReSize(num_data); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->ReSize(num_data); } } } inline void CopySubrow(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) { if (!is_multi_val_) { bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->CopySubrow(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices); } } } void AddFeaturesFrom(const FeatureGroup* other) { CHECK(is_multi_val_); CHECK(other->is_multi_val_); // every time when new features are added, we need to reconsider sparse or dense double sum_sparse_rate = 0.0f; for (int i = 0; i < num_feature_; ++i) { sum_sparse_rate += bin_mappers_[i]->sparse_rate(); } for (int i = 0; i < other->num_feature_; ++i) { sum_sparse_rate += other->bin_mappers_[i]->sparse_rate(); } sum_sparse_rate /= (num_feature_ + other->num_feature_); int offset = 1; is_dense_multi_val_ = false; if (sum_sparse_rate < MultiValBin::multi_val_bin_sparse_threshold && is_multi_val_) { // use dense multi val bin offset = 0; is_dense_multi_val_ = true; } bin_offsets_.clear(); num_total_bin_ = offset; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } for (int i = 0; i < other->num_feature_; ++i) { const auto& other_bin_mapper = other->bin_mappers_[i]; bin_mappers_.emplace_back(new BinMapper(*other_bin_mapper)); auto num_bin = other_bin_mapper->num_bin(); if (other_bin_mapper->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; multi_bin_data_.emplace_back(other->multi_bin_data_[i]->Clone()); } num_feature_ += other->num_feature_; } inline BinIterator* SubFeatureIterator(int sub_feature) { uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin); } } inline void FinishLoad() { if (is_multi_val_) { OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_feature_; ++i) { OMP_LOOP_EX_BEGIN(); multi_bin_data_[i]->FinishLoad(); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { bin_data_->FinishLoad(); } } inline BinIterator* FeatureGroupIterator() { if (is_multi_val_) { return nullptr; } uint32_t min_bin = bin_offsets_[0]; uint32_t max_bin = bin_offsets_.back() - 1; uint32_t most_freq_bin = 0; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } inline size_t FeatureGroupSizesInByte() { return bin_data_->SizesInByte(); } inline void* FeatureGroupData() { if (is_multi_val_) { return nullptr; } return bin_data_->get_data(); } inline data_size_t Split(int sub_feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin(); uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); if (num_feature_ == 1) { return bin_data_->Split(max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } } else { if (num_feature_ == 1) { return bin_data_->SplitCategorical(max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->SplitCategorical( min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return multi_bin_data_[sub_feature]->Split( max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return multi_bin_data_[sub_feature]->SplitCategorical( max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } /*! * \brief From bin to feature value * \param bin * \return FeatureGroup value of this bin */ inline double BinToValue(int sub_feature_idx, uint32_t bin) const { return bin_mappers_[sub_feature_idx]->BinToValue(bin); } /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const { writer->AlignedWrite(&is_multi_val_, sizeof(is_multi_val_)); writer->AlignedWrite(&is_dense_multi_val_, sizeof(is_dense_multi_val_)); writer->AlignedWrite(&is_sparse_, sizeof(is_sparse_)); writer->AlignedWrite(&num_feature_, sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { bin_mappers_[i]->SaveBinaryToFile(writer); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->SaveBinaryToFile(writer); } } else { bin_data_->SaveBinaryToFile(writer); } } /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const { size_t ret = VirtualFileWriter::AlignedSize(sizeof(is_multi_val_)) + VirtualFileWriter::AlignedSize(sizeof(is_dense_multi_val_)) + VirtualFileWriter::AlignedSize(sizeof(is_sparse_)) + VirtualFileWriter::AlignedSize(sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { ret += bin_mappers_[i]->SizesInByte(); } if (!is_multi_val_) { ret += bin_data_->SizesInByte(); } else { for (int i = 0; i < num_feature_; ++i) { ret += multi_bin_data_[i]->SizesInByte(); } } return ret; } /*! \brief Disable copy */ FeatureGroup& operator=(const FeatureGroup&) = delete; /*! \brief Deep copy */ FeatureGroup(const FeatureGroup& other) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_dense_multi_val_ = other.is_dense_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } if (!is_multi_val_) { bin_data_.reset(other.bin_data_->Clone()); } else { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone()); } } } private: void CreateBinData(int num_data, bool is_multi_val, bool force_dense, bool force_sparse) { if (is_multi_val) { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } } is_multi_val_ = true; } else { if (force_sparse || (!force_dense && num_feature_ == 1 && bin_mappers_[0]->sparse_rate() >= kSparseThreshold)) { is_sparse_ = true; bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { is_sparse_ = false; bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } is_multi_val_ = false; } } /*! \brief Number of features */ int num_feature_; /*! \brief Bin mapper for sub features */ std::vector<std::unique_ptr<BinMapper>> bin_mappers_; /*! \brief Bin offsets for sub features */ std::vector<uint32_t> bin_offsets_; /*! \brief Bin data of this feature */ std::unique_ptr<Bin> bin_data_; std::vector<std::unique_ptr<Bin>> multi_bin_data_; /*! \brief True if this feature is sparse */ bool is_multi_val_; bool is_dense_multi_val_; bool is_sparse_; int num_total_bin_; }; } // namespace LightGBM #endif // LIGHTGBM_FEATURE_GROUP_H_
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/fourier.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { PixelChannel channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; size_t number_channels; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; number_channels=MagickMin(MagickMin(MagickMin( Ar_image->number_channels,Ai_image->number_channels),MagickMin( Br_image->number_channels,Bi_image->number_channels)),MagickMin( Cr_image->number_channels,Ci_image->number_channels)); Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(Cr_image,complex_images,Cr_image->rows,1L) #endif for (y=0; y < (ssize_t) Cr_image->rows; y++) { register const Quantum *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register Quantum *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Cr_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Cr_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Cr_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Cr_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) || (Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) || (Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) Cr_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_channels; i++) { switch (op) { case AddComplexOperator: { Cr[i]=Ar[i]+Br[i]; Ci[i]=Ai[i]+Bi[i]; break; } case ConjugateComplexOperator: default: { Cr[i]=Ar[i]; Ci[i]=(-Bi[i]); break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal((double) Br[i]*Br[i]+Bi[i]*Bi[i]+snr); Cr[i]=gamma*((double) Ar[i]*Br[i]+(double) Ai[i]*Bi[i]); Ci[i]=gamma*((double) Ai[i]*Br[i]-(double) Ar[i]*Bi[i]); break; } case MagnitudePhaseComplexOperator: { Cr[i]=sqrt((double) Ar[i]*Ar[i]+(double) Ai[i]*Ai[i]); Ci[i]=atan2((double) Ai[i],(double) Ar[i])/(2.0*MagickPI)+0.5; break; } case MultiplyComplexOperator: { Cr[i]=QuantumScale*((double) Ar[i]*Br[i]-(double) Ai[i]*Bi[i]); Ci[i]=QuantumScale*((double) Ai[i]*Br[i]+(double) Ar[i]*Bi[i]); break; } case RealImaginaryComplexOperator: { Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5)); Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5)); break; } case SubtractComplexOperator: { Cr[i]=Ar[i]-Br[i]; Ci[i]=Ai[i]-Bi[i]; break; } } } Ar+=GetPixelChannels(Ar_image); Ai+=GetPixelChannels(Ai_image); Br+=GetPixelChannels(Br_image); Bi+=GetPixelChannels(Bi_image); Cr+=GetPixelChannels(Cr_image); Ci+=GetPixelChannels(Ci_image); } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register Quantum *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } } i++; q+=GetPixelChannels(magnitude_image); } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } } i++; q+=GetPixelChannels(phase_image); } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(image,p); break; } case GreenPixelChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(image,p); break; } case BluePixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(image,p); break; } case BlackPixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlack(image,p); break; } case AlphaPixelChannel: { source_pixels[i]=QuantumScale*GetPixelAlpha(image,p); break; } } i++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsImageGray(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayPixelChannel,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->alpha_trait != UndefinedPixelTrait) thread_status=ForwardFourierTransformChannel(image, AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p); break; } case GreenPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p); break; } case BluePixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p); break; } case BlackPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p); break; } case AlphaPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p); break; } } i++; p+=GetPixelChannels(magnitude_image); } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p); break; } case GreenPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p); break; } case BluePixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p); break; } case BlackPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p); break; } case AlphaPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p); break; } } i++; p+=GetPixelChannels(phase_image); } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register Quantum *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BluePixelChannel: { SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BlackPixelChannel: { SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case AlphaPixelChannel: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } } i++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsImageGray(magnitude_image); if (is_gray != MagickFalse) is_gray=IsImageGray(phase_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayPixelChannel,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->alpha_trait != UndefinedPixelTrait) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
VolumetricDilatedMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/VolumetricDilatedMaxPooling.c" #else #include <THNN/generic/pooling_shape.h> #include <algorithm> static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int ndim = input->dim(); int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; THArgCheck(kT > 0 && kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 14, "dilation should be greater than 0, but got dilationT: %d dilationH: %d dilationW: %d", dilationT, dilationH, dilationW); THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } THArgCheck(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH, 2, "pad should be smaller than half of kernel size, but got " "kT: %d kW: %d, kH: %d, padT: %d, padW: %d, padH: %d", kT, kW, kH, pT, pW, pH); nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceilMode); oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceilMode); owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceilMode); if (otime < 1 || owidth < 1 || oheight < 1) THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth); } if (indices != NULL) { THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimN, nslices); THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimt, otime); THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimh, oheight); THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimw, owidth); } } static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( scalar_t *input_p, scalar_t *output_p, THIndex_t *indz_p, int64_t nslices, int64_t itime, int64_t iwidth, int64_t iheight, int64_t otime, int64_t owidth, int64_t oheight, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH) { int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { /* loop over output */ int64_t i, j, ti; scalar_t *ip = input_p + k * itime * iwidth * iheight; for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { /* local pointers */ int64_t start_t = ti * dT - pT; int64_t start_h = i * dH - pH; int64_t start_w = j * dW - pW; int64_t end_t = std::min(start_t + (kT - 1) * dilationT + 1, itime); int64_t end_h = std::min(start_h + (kH - 1) * dilationH + 1, iheight); int64_t end_w = std::min(start_w + (kW - 1) * dilationW + 1, iwidth); while(start_t < 0) start_t += dilationT; while(start_h < 0) start_h += dilationH; while(start_w < 0) start_w += dilationW; scalar_t *op = output_p + k * otime * owidth * oheight + ti * owidth * oheight + i * owidth + j; THIndex_t *indzp = indz_p + k * otime * owidth * oheight + ti * owidth * oheight + i * owidth + j; /* compute local max: */ int64_t maxindex = -1; scalar_t maxval = -THInf; int64_t x,y,z; int64_t index = 0; for (z = start_t; z < end_t; z += dilationT) { for (y = start_h; y < end_h; y += dilationH) { for (x = start_w; x < end_w; x += dilationW) { index = z * iwidth * iheight + y * iwidth + x; scalar_t val = ip[index]; if ((val > maxval) || std::isnan(val)) { maxval = val; maxindex = index; } } } } // store location of max *indzp = maxindex; /* set output to local max */ *op = maxval; } } } } } void THNN_(VolumetricDilatedMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; scalar_t *input_data; scalar_t *output_data; THIndex_t *indices_data; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, NULL, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH, ceilMode); /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceilMode); oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceilMode); owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceilMode); /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->dim() == 4) /* non-batch mode */ { /* resize output */ THTensor_(resize4d)(output, nslices, otime, oheight, owidth); /* indices will contain ti,i,j uchar locations packed into float/double */ THIndexTensor_(resize4d)(indices, nslices, otime, oheight, owidth); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); indices_data = THIndexTensor_(data)(indices); THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( input_data, output_data, indices_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } else /* batch mode */ { int64_t p; int64_t nBatch = input->size(0); int64_t istride = nslices * itime * iwidth * iheight; int64_t ostride = nslices * otime * owidth * oheight; /* resize output */ THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); /* indices will contain ti,i,j locations for each output point */ THIndexTensor_(resize5d)(indices, nBatch, nslices, otime, oheight, owidth); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(p) for (p=0; p < nBatch; p++) { THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( input_data + p * istride, output_data + p * ostride, indices_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } } /* cleanup */ c10::raw::intrusive_ptr::decref(input); } static void THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( scalar_t *gradInput_p, scalar_t *gradOutput_p, THIndex_t *indz_p, int64_t nslices, int64_t itime, int64_t iwidth, int64_t iheight, int64_t otime, int64_t owidth, int64_t oheight, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH) { int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { scalar_t *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight; scalar_t *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight; THIndex_t *indz_p_k = indz_p + k * otime * owidth * oheight; /* calculate max points */ int64_t ti, i, j; for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { /* retrieve position of max */ int64_t index = ti * oheight * owidth + i * owidth + j; int64_t maxp = indz_p_k[index]; if (maxp != -1) { /* update gradient */ gradInput_p_k[maxp] += gradOutput_p_k[index]; } } } } } } void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int nslices; int itime; int iheight; int iwidth; int otime; int oheight; int owidth; scalar_t *gradInput_data; scalar_t *gradOutput_data; THIndex_t *indices_data; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, gradOutput, indices, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH, ceilMode); // TODO: gradOutput shape check /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = gradOutput->size(dimt); oheight = gradOutput->size(dimh); owidth = gradOutput->size(dimw); /* get raw pointers */ gradInput_data = gradInput->data<scalar_t>(); gradOutput_data = gradOutput->data<scalar_t>(); indices_data = THIndexTensor_(data)(indices); /* backprop */ if (input->dim() == 4) /* non-batch mode*/ { THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( gradInput_data, gradOutput_data, indices_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } else /* batch mode */ { int64_t p; int64_t nBatch = input->size(0); int64_t istride = nslices * itime * iwidth * iheight; int64_t ostride = nslices * otime * owidth * oheight; #pragma omp parallel for private(p) for (p = 0; p < nBatch; p++) { THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( gradInput_data + p * istride, gradOutput_data + p * ostride, indices_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } } /* cleanup */ c10::raw::intrusive_ptr::decref(gradOutput); } #endif
gemm_x_dia_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < mat->rows; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c < columns; c++) alpha_mul(Y[c],Y[c],beta); } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid,num_threads,columns); ALPHA_INT bch = cross_block_high(tid,num_threads,columns); for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number *Y = &y[index2(ar, 0, ldy)]; const ALPHA_Number *X = &x[index2(ac, 0, ldx)]; ALPHA_Number val; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); for(ALPHA_INT bc = bcl;bc < bch;++bc){ alpha_madde(Y[bc],val,X[bc]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
THTensorMath.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorMath.c" #else #ifdef _OPENMP #include <omp.h> #endif #define TH_OMP_OVERHEAD_THRESHOLD 100000 #ifdef _OPENMP #ifndef _WIN32 #define PRAGMA(P) _Pragma(#P) #else #define PRAGMA(P) __pragma(P) #endif #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \ PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \ ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \ CODE \ } #endif #ifdef _OPENMP #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } #endif #ifdef _OPENMP #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } #endif void THTensor_(fill)(THTensor *r_, real value) { if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) { TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len);); } else { TH_TENSOR_APPLY(real, r_, if (r__stride == 1) { THVector_(fill)(r__data, value, r__size); r__i = r__size; r__data += r__stride * r__size; break; } else { *r__data = value; } ); } } void THTensor_(zero)(THTensor *r_) { THTensor_(fill)(r_, 0); } void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) { TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = value; }); } void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) { THTensor *srct = THTensor_(newContiguous)(src); real *src_data = THTensor_(data)(srct); ptrdiff_t cntr = 0; ptrdiff_t nelem = THTensor_(nElement)(srct); if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) { THTensor_(free)(srct); THError("Number of elements of destination tensor != Number of elements in mask"); } TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { if (cntr == nelem) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Number of elements of src < number of ones in mask"); } *tensor_data = *src_data; src_data++; cntr++; }); THTensor_(free)(srct); } void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) { ptrdiff_t numel = THByteTensor_sumall(mask); real *tensor_data; #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THTensor_(resize1d)(tensor,numel); tensor_data = THTensor_(data)(tensor); TH_TENSOR_APPLY2(real, src, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(src_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = *src_data; tensor_data++; }); } // Finds non-zero elements of a tensor and returns their subscripts void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) { ptrdiff_t numel = 0; int64_t *subscript_data; int64_t i = 0; int64_t dim; int64_t div = 1; #ifdef TH_REAL_IS_HALF #define IS_NONZERO(val) ((val.x & 0x7fff) != 0) #else #define IS_NONZERO(val) ((val)!=0) #endif /* First Pass to determine size of subscripts */ TH_TENSOR_APPLY(real, tensor, if IS_NONZERO(*tensor_data) { ++numel; }); #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THLongTensor_resize2d(subscript, numel, tensor->nDimension); /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); TH_TENSOR_APPLY(real, tensor, if IS_NONZERO(*tensor_data) { div = 1; for (dim = tensor->nDimension - 1; dim >= 0; dim--) { *(subscript_data + dim) = (i/div) % tensor->size[dim]; div *= tensor->size[dim]; } subscript_data += tensor->nDimension; } ++i;); } void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { ptrdiff_t i, numel; THLongStorage *newSize; THTensor *tSlice, *sSlice; int64_t *index_data; real *tensor_data, *src_data; THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(src->nDimension > 0,2,"Source tensor is empty"); numel = THLongTensor_nElement(index); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize,src->size); #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif newSize->data[dim] = numel; THTensor_(resize)(tensor,newSize,NULL); THLongStorage_free(newSize); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) { tensor_data = THTensor_(data)(tensor); src_data = THTensor_(data)(src); ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0]; // check that the indices are within range int64_t max = src->size[0] - 1 + TH_INDEX_BASE; for (i=0; i<numel; i++) { if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) { THLongTensor_free(index); THError("index out of range"); } } if (src->nDimension == 1) { #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE]; } else { #pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real)); } } else if (src->nDimension == 1) { for (i=0; i<numel; i++) THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE)); } else { for (i=0; i<numel; i++) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor, dim, i); THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE); THTensor_(copy)(tSlice, sSlice); THTensor_(free)(tSlice); THTensor_(free)(sSlice); } } THLongTensor_free(index); } void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE); THTensor_(select)(sSlice, src, dim, i); THTensor_(copy)(tSlice, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i)); } } THLongTensor_free(index); } void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE); THTensor_(select)(sSlice, src, dim, i); THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE)); } } THLongTensor_free(index); } void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { ptrdiff_t i, numel; THTensor *tSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); for (i=0; i<numel; i++) { if (tensor->nDimension > 1) { tSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE); THTensor_(fill)(tSlice, val); THTensor_(free)(tSlice); } else { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val); } } THLongTensor_free(index); } void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { int64_t elems_per_row, i, idx; THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2, "Input tensor must have same dimensions as output tensor"); THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4, "Index tensor must have same dimensions as input tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in gather"); } *(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride]; }) } void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride); }) } void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY2(real, tensor, int64_t, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val; }) } accreal THTensor_(dot)(THTensor *tensor, THTensor *src) { accreal sum = 0; /* we use a trick here. careful with that. */ TH_TENSOR_APPLY2(real, tensor, real, src, int64_t sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); tensor_i += sz; src_i += sz; tensor_data += sz*tensor_stride; src_data += sz*src_stride; break;); return sum; } #undef th_isnan #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan(val) \ (isnan(val)) #else #define th_isnan(val) (0) #endif #undef th_isnan_break #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan_break(val) \ if (isnan(val)) break; #else #define th_isnan_break(val) #endif real THTensor_(minall)(THTensor *tensor) { real theMin; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMin = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theMin = value; th_isnan_break(value) }); return theMin; } real THTensor_(maxall)(THTensor *tensor) { real theMax; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMax = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theMax = value; th_isnan_break(value) }); return theMax; } accreal THTensor_(sumall)(THTensor *tensor) { accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); return sum; } accreal THTensor_(prodall)(THTensor *tensor) { accreal prod = 1; TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); return prod; } void THTensor_(add)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len);); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); } } void THTensor_(sub)(THTensor *r_, THTensor *t, real value) { THTensor_(add)(r_, t, -value); } void THTensor_(mul)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len);); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); } } void THTensor_(div)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len);); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); } } void THTensor_(lshift)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(mul)(r_, t, powf(2, value)); #elif defined(TH_REAL_IS_DOUBLE) return THTensor_(mul)(r_, t, pow(2, value)); #elif defined(TH_REAL_IS_HALF) return THError("lshift is not supported for torch.HalfTensor"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) << value; #else rp[i] = ((ureal) tp[i]) << value; #endif } } else { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) << value);); #endif } #endif } void THTensor_(rshift)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(div)(r_, t, powf(2, value)); #elif defined(TH_REAL_IS_DOUBLE) return THTensor_(div)(r_, t, pow(2, value)); #elif defined(TH_REAL_IS_HALF) return THError("rshift is not supported for torch.HalfTensor"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) >> value; #else rp[i] = ((ureal) tp[i]) >> value; #endif } } else { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) >> value);); #endif } #endif } void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = fmod(tp[i], value); #else rp[i] = tp[i] % value; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value);); #endif } } void THTensor_(remainder)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value); #else // There is no NAN for integers rp[i] = tp[i] % value; if (rp[i] * value < 0) rp[i] += value; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value);); #else // There is no NAN for integers TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value; if (*r__data * value < 0) *r__data += value;); #endif } } void THTensor_(bitand)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("bitand is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] & value; } } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;); } #endif } void THTensor_(bitor)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("bitor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] | value; } } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;); } #endif } void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("bitxor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] ^ value; } } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;); } #endif } void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); /* real t_val; */ ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } } void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) { // __debugbreak(); THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { if(r_ == t) { THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1); } else { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len);); } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); } } void THTensor_(csub)(THTensor *r_, THTensor *t, real value,THTensor *src) { THTensor_(cadd)(r_, t, -value, src); } void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len);); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); } } void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(tp[i], sp[i]); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data);); } } void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len);); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;); } } void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_HALF) return THError("clshift is not supported for torch.HalfTensor"); #endif THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) rp[i] = tp[i] * powf(2, sp[i]); #elif defined(TH_REAL_IS_DOUBLE) rp[i] = tp[i] * pow(2, sp[i]); #elif defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) << sp[i]; #else rp[i] = ((ureal) tp[i]) << sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;); #endif } } void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_HALF) return THError("crshift is not supported for torch.HalfTensor"); #endif THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) rp[i] = tp[i] / powf(2, sp[i]); #elif defined(TH_REAL_IS_DOUBLE) rp[i] = tp[i] / pow(2, sp[i]); #elif defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) >> sp[i]; #else rp[i] = ((ureal) tp[i]) >> sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;); #endif } } void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = fmod(tp[i], sp[i]); #else rp[i] = tp[i] % sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data);); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data);); #endif } } void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]); #else // There is no NAN for integers rp[i] = tp[i] % sp[i]; if (rp[i] * sp[i] < 0) rp[i] += sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data);); #else // There is no NAN for integers TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data; if (*r__data * *src_data < 0) *r__data += *src_data;); #endif } } void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("cbitand is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] & sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;); } #endif } void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("cbitor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] | sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;); } #endif } void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("cbitxor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] ^ sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;); } #endif } void THTensor_(tpow)(THTensor *r_, real value, THTensor *t) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(value, tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data);); } } void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;); } void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;); } void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension); if( mat->size[1] != vec->size[0] ) { THDescBuff bm = THTensor_(sizeDesc)(mat); THDescBuff bv = THTensor_(sizeDesc)(vec); THError("size mismatch, %s, %s", bm.str, bv.str); } if(t->nDimension != 1) THError("vector expected, got t: %dD", t->nDimension); if(t->size[0] != mat->size[0]) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm = THTensor_(sizeDesc)(mat); THError("size mismatch, t: %s, mat: %s", bt.str, bm.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(mat->stride[0] == 1) { THBlas_(gemv)('n', mat->size[0], mat->size[1], alpha, THTensor_(data)(mat), mat->stride[1], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else if(mat->stride[1] == 1) { THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(mat), mat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cmat = THTensor_(newContiguous)(mat); THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(cmat), cmat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); THTensor_(free)(cmat); } } void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) { int64_t N1 = m1->size[0]; int64_t N2 = m2->size[0]; int64_t dim; real *m1_p; real *m2_p; real *r_p; int64_t i; THTensor_(resize2d)(r_, N1, N2); m1 = THTensor_(newContiguous)(m1); m2 = THTensor_(newContiguous)(m2); THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1); THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2); dim = m1->size[1]; THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim"); m1_p = THTensor_(data)(m1); m2_p = THTensor_(data)(m2); r_p = THTensor_(data)(r_); #pragma omp parallel for private(i) for (i=0; i<N1; i++) { int64_t j,k; for (j=0; j<N2; j++) { real sum = 0; for (k=0; k<dim; k++) { real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ]; sum += term*term; } r_p[ i*N2 + j ] = gain * sum; } } THTensor_(free)(m1); THTensor_(free)(m2); } void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2) { char transpose_r, transpose_m1, transpose_m2; THTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2)) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(m1->size[1] != m2->size[0]) { THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( t->nDimension != 2 ) THError("matrix expected, got %dD tensor for t", t->nDimension); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1); r__ = THTensor_(newClone)(transp_r_); THTensor_(free)(transp_r_); THTensor_(transpose)(r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THTensor_(newContiguous)(m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THTensor_(newContiguous)(m2); } /* do the operation */ THBlas_(gemm)(transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THTensor_(data)(m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THTensor_(data)(m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THTensor_(data)(r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); /* free intermediate variables */ if(m1_ != m1) THTensor_(free)(m1_); if(m2_ != m2) THTensor_(free)(m2_); if(r__ != r_) THTensor_(freeCopyTo)(r__, r_); } void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension); if(t->nDimension != 2) THError("expected matrix, got %dD tensor for t", t->nDimension); if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bv1 = THTensor_(sizeDesc)(vec1); THDescBuff bv2 = THTensor_(sizeDesc)(vec2); THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(beta == 0) { THTensor_(zero)(r_); } else if(beta != 1) THTensor_(mul)(r_, r_, beta); if(r_->stride[0] == 1) { THBlas_(ger)(vec1->size[0], vec2->size[0], alpha, THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(r_), r_->stride[1]); } else if(r_->stride[1] == 1) { THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cr = THTensor_(newClone)(r_); THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(cr), cr->stride[0]); THTensor_(freeCopyTo)(cr, r_); } } void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor"); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor"); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2)); int64_t dim1 = THTensor_(size)(batch1, 1); int64_t dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); THTensor_(copy)(result, t); } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2); beta = 1; // accumulate output once } THTensor_(free)(matrix1); THTensor_(free)(matrix2); } void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1)); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2)); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2)); int64_t bs = THTensor_(size)(batch1, 0); int64_t dim1 = THTensor_(size)(batch1, 1); int64_t dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); THTensor_(copy)(result, t); } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); THTensor *result_matrix = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(select)(result_matrix, result, 0, batch); THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2); } THTensor_(free)(matrix1); THTensor_(free)(matrix2); THTensor_(free)(result_matrix); } ptrdiff_t THTensor_(numel)(THTensor *t) { return THTensor_(nElement)(t); } void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { real theMax; real value; int64_t theIndex; int64_t i; TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theIndex = i; theMax = value; th_isnan_break(value) } } *indices__data = theIndex; *values__data = theMax;); } else { if (THTensor_(nDimension)(t) > 1) { THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); THTensor_(copy)(values_, t0); THTensor_(free)(t0); } else { THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); } THLongTensor_zero(indices_); if(t->size[dimension] == 1) { return; } THTensor *tempValues_ = THTensor_(newWithTensor)(values_); // tempValues_.expand_as(t) tempValues_->size[dimension] = t->size[dimension]; tempValues_->stride[dimension] = 0; THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); // tempIndices_.expand_as(t) tempIndices_->size[dimension] = t->size[dimension]; tempIndices_->stride[dimension] = 0; TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); THTensor_(free)(tempValues_); THLongTensor_free(tempIndices_); } } void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { real theMax; real value; int64_t theIndex; int64_t i; TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value >= theMax)) { theIndex = i; theMax = value; th_isnan_break(value) } } *indices__data = theIndex; *values__data = theMax;); } else { if (THTensor_(nDimension)(t) > 1) { THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); THTensor_(copy)(values_, t0); THTensor_(free)(t0); } else { THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); } THLongTensor_zero(indices_); if(t->size[dimension] == 1) { return; } THTensor *tempValues_ = THTensor_(newWithTensor)(values_); // tempValues_.expand_as(t) tempValues_->size[dimension] = t->size[dimension]; tempValues_->stride[dimension] = 0; THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); // tempIndices_.expand_as(t) tempIndices_->size[dimension] = t->size[dimension]; tempIndices_->stride[dimension] = 0; TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); } } void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum;); } else { THTensor_(zero)(r_); THTensor *temp_ = THTensor_(newWithTensor)(r_); // r_.expand_as(t) temp_->size[dimension] = t->size[dimension]; temp_->stride[dimension] = 0; TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;); THTensor_(free)(temp_); } } void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal prod = 1; int64_t i; for(i = 0; i < t_size; i++) prod *= t_data[i*t_stride]; *r__data = (real)prod;); } else { THTensor_(fill)(r_, 1); THTensor *temp_ = THTensor_(newWithTensor)(r_); // r_.expand_as(t) temp_->size[dimension] = t->size[dimension]; temp_->stride[dimension] = 0; TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;); THTensor_(free)(temp_); } } void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumsum = 0; int64_t i; for(i = 0; i < t_size; i++) { cumsum += t_data[i*t_stride]; r__data[i*r__stride] = (real)cumsum; }); } void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumprod = 1; int64_t i; for(i = 0; i < t_size; i++) { cumprod *= t_data[i*t_stride]; r__data[i*r__stride] = (real)cumprod; }); } void THTensor_(sign)(THTensor *r_, THTensor *t) { THTensor_(resizeAs)(r_, t); #if defined (TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else *r__data = 0;); #else TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else if (*t_data < 0) *r__data = -1; else *r__data = 0;); #endif } accreal THTensor_(trace)(THTensor *t) { real *t_data = THTensor_(data)(t); accreal sum = 0; int64_t i = 0; int64_t t_stride_0, t_stride_1, t_diag_size; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)); while(i < t_diag_size) { sum += t_data[i*(t_stride_0+t_stride_1)]; i++; } return sum; } void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) { int i; if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b)) THError("inconsistent tensor dimension %dD, %dD", THTensor_(nDimension)(a), THTensor_(nDimension)(b)); for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) { THDescBuff ba = THTensor_(sizeDesc)(a); THDescBuff bb = THTensor_(sizeDesc)(b); THError("inconsistent tensor sizes %s, %s", ba.str, bb.str); } } if(dimension < 0) { for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) == 3) { dimension = i; break; } } if(dimension < 0) { THDescBuff ba = THTensor_(sizeDesc)(a); THError("no dimension of size 3 in a: %s", ba.str); } } THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range", dimension + TH_INDEX_BASE); THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, a); TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];); } void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data > *src_data ? *t_data : *src_data;); } void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data < *src_data ? *t_data : *src_data;); } void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data > value ? *t_data : value;); } void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data < value ? *t_data : value;); } void THTensor_(zeros)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(zero)(r_); } void THTensor_(ones)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(fill)(r_, 1); } void THTensor_(diag)(THTensor *r_, THTensor *t, int k) { THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected"); if(THTensor_(nDimension)(t) == 1) { real *t_data = THTensor_(data)(t); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_size = THTensor_(size)(t, 0); int64_t sz = t_size + (k >= 0 ? k : -k); real *r__data; int64_t r__stride_0; int64_t r__stride_1; int64_t i; THTensor_(resize2d)(r_, sz, sz); THTensor_(zero)(r_); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); for(i = 0; i < t_size; i++) r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0]; } else { real *t_data = THTensor_(data)(t); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_stride_1 = THTensor_(stride)(t, 1); int64_t sz; real *r__data; int64_t r__stride_0; int64_t i; if(k >= 0) sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k); else sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); THTensor_(resize1d)(r_, sz); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); for(i = 0; i < sz; i++) r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)]; } } void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m) { real *r__data; int64_t i, sz; THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THTensor_(resize2d)(r_, n, m); THTensor_(zero)(r_); i = 0; r__data = THTensor_(data)(r_); sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); for(i = 0; i < sz; i++) r__data[i*(r_->stride[0]+r_->stride[1])] = 1; } void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { ptrdiff_t size; real i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound incoherent with step sign"); size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THTensor_(nElement)(r_) != size) { THTensor_(resize1d)(r_, size); } TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); } void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n) { real *r__data; int64_t r__stride_0; int64_t i; THArgCheck(n > 0, 1, "must be strictly positive"); THTensor_(resize1d)(r_, n); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_,0); for(i = 0; i < n; i++) r__data[i*r__stride_0] = (real)(i); for(i = 0; i < n-1; i++) { int64_t z = THRandom_random(_generator) % (n-i); real sav = r__data[i*r__stride_0]; r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; r__data[(z+i)*r__stride_0] = sav; } } void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(copy)(r_, t); } /* I cut and pasted (slightly adapted) the quicksort code from Sedgewick's 1978 "Implementing Quicksort Programs" article http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf It is the state of the art existing implementation. The macros are here to make as close a match as possible to the pseudocode of Program 2 p.851 Note that other partition schemes exist, and are typically presented in textbook, but those are less efficient. See e.g. http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto Julien, November 12th 2013 */ #define MAX_LEVELS 300 #define M_SMALL 10 /* Limit for small subfiles */ #define ARR(III) arr[(III)*stride] #define IDX(III) idx[(III)*stride] #define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap #define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap #define BOTH_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); \ LONG_SWAP(IDX(III), IDX(JJJ)) static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) < piv); do { j = j-1; } while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) > ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) < piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } static void THTensor_(quicksortdescend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) > piv); do { j = j-1; } while(ARR(j) < piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) < ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) > piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } #undef MAX_LEVELS #undef M_SMALL void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(rt_, t); THTensor_(copy)(rt_, t); { THLongStorage *size = THTensor_(newSizeOf)(t); THLongTensor_resize(ri_, size, NULL); THLongStorage_free(size); } if(descendingOrder) { TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);) } else { TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);) } } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. */ static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j, swap, pid; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } #undef ARR #undef IDX #undef LONG_SWAP #undef REAL_SWAP #undef BOTH_SWAP void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; int64_t *tempi__data; int64_t t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, int64_t i; real mode = 0; int64_t modei = 0; int64_t temp_freq = 0; int64_t max_freq = 0; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1); for(i = 0; i < t_size_dim; i++) { temp_freq++; if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1])) { if (temp_freq > max_freq) { mode = temp__data[i]; modei = tempi__data[i]; max_freq = temp_freq; } temp_freq = 0; } } *values__data = mode; *indices__data = modei;); THTensor_(free)(temp_); THLongTensor_free(tempi_); } void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, int64_t k, int dimension) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; int64_t *tempi__data; int64_t t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, int64_t i; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1); *values__data = temp__data[k-1]; *indices__data = tempi__data[k-1];); THTensor_(free)(temp_); THLongTensor_free(tempi_); } void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { int64_t t_size_dim, k; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); t_size_dim = THTensor_(size)(t, dimension); k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */ THTensor_(kthvalue)(values_, indices_, t, k+1, dimension); } void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, int dim, int dir, int sorted) { int numDims = THTensor_(nDimension)(t); THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range"); int64_t sliceSize = THTensor_(size)(t, dim); THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension"); THTensor *tmpResults = THTensor_(new)(); THTensor_(resize1d)(tmpResults, sliceSize); real *tmp__data = THTensor_(data)(tmpResults); THLongTensor *tmpIndices = THLongTensor_new(); THLongTensor_resize1d(tmpIndices, sliceSize); int64_t *tmpi__data = THLongTensor_data(tmpIndices); THLongStorage *topKSize = THTensor_(newSizeOf)(t); THLongStorage_set(topKSize, dim, k); THTensor_(resize)(rt_, topKSize, NULL); THLongTensor_resize(ri_, topKSize, NULL); THLongStorage_free(topKSize); if (dir) { /* k largest elements, descending order (optional: see sorted) */ int64_t K = sliceSize - k; TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, int64_t i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } if (K > 0) THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1); if (sorted) THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i + K]; ri__data[i*ri__stride] = tmpi__data[i + K]; }) } else { /* k smallest elements, ascending order (optional: see sorted) */ TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, int64_t i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1); if (sorted) THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i]; ri__data[i*ri__stride] = tmpi__data[i]; }) } THTensor_(free)(tmpResults); THLongTensor_free(tmpIndices); } void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k) { int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; real *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { int64_t sz = THMin(r+k+1, t_size_1); for(c = THMax(0, r+k+1); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; } } void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k) { int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; real *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { int64_t sz = THMin(r+k, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; } } void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension) { THTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THTensor_(catArray)(r_, inputs, 2, dimension); } void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension) { THLongStorage *size; int i, j; int64_t offset; int maxDim = dimension + 1; int allEmpty = 1; int allContiguous = 1; // cat_dimension is the actual dimension we cat along int cat_dimension = dimension; for (i = 0; i < numInputs; i++) { maxDim = THMax(maxDim, inputs[i]->nDimension); } // When the user input dimension is -1 (i.e. -2 in C) // Then we pick the maximum last dimension across all tensors. if ( dimension + TH_INDEX_BASE == -1 ) { cat_dimension = maxDim?(maxDim-1):0; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE); size = THLongStorage_newWithSize(maxDim); for(i = 0; i < maxDim; i++) { // dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0 int64_t dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : THMin(inputs[0]->nDimension, 1); if (i == cat_dimension) { for (j = 1; j < numInputs; j++) { // accumulate the size over the dimension we want to cat on. // Empty tensors are allowed dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1); } } else { for (j = 1; j < numInputs; j++) { int64_t sz = (i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1)); // If it's a dimension we're not catting on // Then fail if sizes are different AND > 0 if (dimSize != sz && dimSize && sz) { THLongStorage_free(size); THError("inconsistent tensor sizes"); } else if(!dimSize) { dimSize = sz; } } } allEmpty = allEmpty && !dimSize; size->data[i] = dimSize; } // Initiate catting and resizing // If at least one of the input is not empty if (!allEmpty) { THTensor_(resize)(result, size, NULL); // Check contiguity of all inputs and result for (i = 0; i < numInputs; i++) { if(inputs[i]->nDimension) { allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]); } } allContiguous = allContiguous && THTensor_(isContiguous)(result); // First path is for contiguous inputs aint64_t dim 1 // Second path for non-contiguous if (cat_dimension == 0 && allContiguous) { real* result_data = result->storage->data + result->storageOffset; offset = 0; for (j = 0; j < numInputs; j++) { if (inputs[j]->nDimension) { THTensor* input0 = inputs[j]; real* input0_data = input0->storage->data + input0->storageOffset; int64_t input0_size = THTensor_(nElement)(input0); memcpy(result_data + offset, input0_data, input0_size*sizeof(real)); offset += input0_size; } } } else { offset = 0; for (j = 0; j < numInputs; j++) { if (inputs[j]->nDimension) { int64_t dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1; THTensor *nt = THTensor_(newWithTensor)(result); THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize); THTensor_(copy)(nt, inputs[j]); THTensor_(free)(nt); offset += dimSize; } } } } THLongStorage_free(size); } int THTensor_(equal)(THTensor *ta, THTensor* tb) { int equal = 1; if(!THTensor_(isSameSizeAs)(ta, tb)) return 0; if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) { real *tap = THTensor_(data)(ta); real *tbp = THTensor_(data)(tb); ptrdiff_t sz = THTensor_(nElement)(ta); ptrdiff_t i; for (i=0; i<sz; ++i){ if(tap[i] != tbp[i]) return 0; } } else { // Short-circuit the apply function on inequality TH_TENSOR_APPLY2(real, ta, real, tb, if (equal && *ta_data != *tb_data) { equal = 0; TH_TENSOR_APPLY_hasFinished = 1; break; }) } return equal; } #define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \ void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \ { \ THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \ TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ { \ THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \ TH_TENSOR_APPLY2(real, r_, real, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ { \ THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \ TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ { \ THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \ TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ TENSOR_IMPLEMENT_LOGICAL(lt,<) TENSOR_IMPLEMENT_LOGICAL(gt,>) TENSOR_IMPLEMENT_LOGICAL(le,<=) TENSOR_IMPLEMENT_LOGICAL(ge,>=) TENSOR_IMPLEMENT_LOGICAL(eq,==) TENSOR_IMPLEMENT_LOGICAL(ne,!=) #define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ } \ #define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \ } \ #if defined(TH_REAL_IS_LONG) LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs) #endif /* int64_t only part */ #if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) #endif /* int only part */ #if defined(TH_REAL_IS_BYTE) #define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \ int THTensor_(NAME)(THTensor *tensor) \ { \ THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \ int sum = INIT_VALUE; \ TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \ return sum; \ } TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1) TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0) #endif /* Byte only part */ /* floating point only now */ #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) LAB_IMPLEMENT_BASIC_FUNCTION(log,log) LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p) LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_sigmoid) LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp) LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos) LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos) LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh) LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin) LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin) LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh) LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan) LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan) LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh) LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow) LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt) LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_rsqrt) LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil) LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor) LAB_IMPLEMENT_BASIC_FUNCTION(round,round) LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs) LAB_IMPLEMENT_BASIC_FUNCTION(trunc,trunc) LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_frac) LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) LAB_IMPLEMENT_BASIC_FUNCTION(cinv, 1.0 / ) void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) { THTensor_(resizeAs)(r_, tx); TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data);); } void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight) { THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match"); THTensor_(resizeAs)(r_, a); TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_lerp(*a_data, *b_data, weight);); } void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(sum)(r_, t, dimension); THTensor_(div)(r_, r_, t->size[dimension]); } void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; accreal sum2 = 0; int64_t i; for(i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; sum += z; sum2 += z*z; } if(flag) { sum /= t_size; sum2 /= t_size; sum2 -= sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sqrt(sum2); } else { sum /= t_size; sum2 /= t_size-1; sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sqrt(sum2); }); } void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; accreal sum2 = 0; int64_t i; for(i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; sum += z; sum2 += z*z; } if(flag) { sum /= t_size; sum2 /= t_size; sum2 -= sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = sum2; } else { sum /= t_size; sum2 /= t_size-1; sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sum2; }); } void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); if(value == 0) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride] != 0.0; *r__data = sum;) } else { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += pow(fabs(t_data[i*t_stride]), value); *r__data = pow(sum, 1.0/value);) } } accreal THTensor_(normall)(THTensor *tensor, real value) { accreal sum = 0; if(value == 0) { TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); return sum; } else if(value == 1) { TH_TENSOR_APPLY(real, tensor, sum += fabs(*tensor_data);); return sum; } else if(value == 2) { TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); return sqrt(sum); } else { TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value);); return pow(sum, 1.0/value); } } void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) { int i; THTensor *rowR, *rowS; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions", THTensor_(nDimension)(src)); rowR = THTensor_(new)(); rowS = THTensor_(new)(); THTensor_(resizeAs)(res, src); for (i=0; i<src->size[dimension]; i++) { real norm = 0; real new_norm; THTensor_(select)(rowS, src, dimension, i); THTensor_(select)(rowR, res, dimension, i); if (value == 1) { TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); } else if (value == 2) { TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); } else { TH_TENSOR_APPLY(real, rowS, norm += pow(fabs(*rowS_data), value);); } norm = pow(norm, 1/value); if (norm > maxnorm) { new_norm = maxnorm / (norm + 1e-7); TH_TENSOR_APPLY2( real, rowR, real, rowS, *rowR_data = (*rowS_data) * new_norm; ) } else THTensor_(copy)(rowR, rowS); } THTensor_(free)(rowR); THTensor_(free)(rowS); } accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) { real sum = 0; TH_TENSOR_APPLY2(real, tensor, real, src, sum += pow(fabs(*tensor_data - *src_data), value);) return pow(sum, 1.0/value); } accreal THTensor_(meanall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor); } accreal THTensor_(varall)(THTensor *tensor) { accreal mean = THTensor_(meanall)(tensor); accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); sum /= (THTensor_(nElement)(tensor)-1); return sum; } accreal THTensor_(stdall)(THTensor *tensor) { return sqrt(THTensor_(varall)(tensor)); } void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { TH_TENSOR_APPLY(real, r_, *r__data = a; i++; ); } else { TH_TENSOR_APPLY(real, r_, *r__data = a + i*(b-a)/((real)(n-1)); i++; ); } } void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { TH_TENSOR_APPLY(real, r_, *r__data = pow(10.0, a); i++; ); } else { TH_TENSOR_APPLY(real, r_, *r__data = pow(10.0, a + i*(b-a)/((real)(n-1))); i++; ); } } void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(uniform)(r_, _generator, 0, 1); } void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(normal)(r_, _generator, 0, 1); } void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) { real minval; real maxval; real *h_data; THTensor_(resize1d)(hist, nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } h_data = THTensor_(data)(hist); TH_TENSOR_APPLY(real, tensor, if (*tensor_data >= minval && *tensor_data <= maxval) { const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins); h_data[THMin(bin, nbins-1)] += 1; } ); } void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) { THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor)); int dimension = 1; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); real minval; real maxval; real *h_data; THTensor_(resize2d)(hist, tensor->size[0], nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, int64_t i; for(i = 0; i < tensor_size; i++) { if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) { const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins); hist_data[THMin(bin, nbins-1)] += 1; } } ); } #endif /* floating point only part */ #undef IS_NONZERO #endif
flip_compute.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <stdint.h> #include <vector> #include "lite/core/kernel.h" namespace paddle { namespace lite { namespace kernels { namespace host { DDimLite stride_flip(const DDimLite& ddim) { std::vector<int64_t> tmp(ddim.size(), 0); DDimLite strides(tmp); strides[ddim.size() - 1] = 1; for (int i = ddim.size() - 2; i >= 0; --i) { strides[i] = strides[i + 1] * ddim[i + 1]; } return strides; } template <typename T> class FlipCompute : public KernelLite<TARGET(kHost), PRECISION(kAny)> { public: using param_t = operators::FcParam; void Run() { auto& param = this->Param<operators::FlipParam>(); auto x = param.X; auto out = param.Out; auto flip_dims = param.axis; auto x_dims = x->dims(); const int total_dims = x_dims.size(); std::vector<bool> dim_bitset(64); for (size_t i = 0; i < flip_dims.size(); ++i) { int dim = flip_dims[i]; if (flip_dims[i] < 0) { dim += total_dims; } dim_bitset[dim] = true; } auto x_strides = stride_flip(x_dims); auto numel = x->numel(); const T* x_data = x->template data<T>(); T* out_data = out->template mutable_data<T>(); #pragma omp parallel for for (int64_t i = 0; i < numel; ++i) { int64_t cur_indices = i; int64_t rem = 0; int64_t dst_offset = 0; for (int d = 0; d < total_dims; ++d) { int64_t temp = cur_indices; cur_indices = cur_indices / x_strides[d]; rem = temp - cur_indices * x_strides[d]; dst_offset += dim_bitset[d] ? (x_dims[d] - 1 - cur_indices) * x_strides[d] : cur_indices * x_strides[d]; cur_indices = rem; } out_data[i] = x_data[dst_offset]; } } ~FlipCompute() = default; }; } // namespace host } // namespace kernels } // namespace lite } // namespace paddle
server.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <strings.h> #include <unistd.h> #include <netdb.h> #include <errno.h> #include <sys/socket.h> #include <sys/select.h> #include <netinet/in.h> #include <arpa/inet.h> #include <omp.h> #include "server.h" static inline int set_socket(struct sockaddr_in * socket_address, int * s, int port, int type) { /* criação da estrutura de dados de endereço */ bzero((char *)socket_address, sizeof(*socket_address)); socket_address->sin_family = AF_INET; socket_address->sin_addr.s_addr = htonl(INADDR_ANY); socket_address->sin_port = htons(port); /* criação de socket passivo */ if ((*s = socket(AF_INET, type, 0)) == -1) { perror("ERROR: Unable to create socket"); exit(errno); } /* Associar socket ao descritor */ if (bind (*s, (struct sockaddr *)socket_address, sizeof(*socket_address))) { perror("ERROR: Unable to bind socket"); exit(errno); } return 0; } static inline void tcp_close_connection(int s) { /* Fecha socket da conexão */ if (close (s) == -1) { perror("ERROR: unable to close client socket"); exit(errno); } } static inline void tcp_handle_new_connection(int s) { struct sockaddr_in client_address; socklen_t client_socklen = sizeof(client_address); char * ip; /* Obtem o IP e Porta do cliente */ if (getpeername(s, (struct sockaddr *) &client_address, &client_socklen) == 0) { printf("CLIENT CONNECTED\n"); ip = inet_ntoa(client_address.sin_addr); printf(" IP: %s\n", ip); printf("PORT: %d\n\n", ntohs(client_address.sin_port)); } else { perror("ERROR: Could not resolve remote port and ip values\n"); exit(errno); } } static inline int tcp_handle_data(int s, void (*app)(char *, char*)) { struct sockaddr_in client_address; socklen_t client_socklen = sizeof(client_address); char * ip; char bufin[MAX_LINE] = {0}, bufout[MAX_LINE] = {0}; int has_data, cport; /* Recebe dado do cliente */ if ((has_data = recv(s, bufin, MAX_LINE, 0)) == -1) { perror("ERROR: unable to receive data"); exit(errno); } if (!has_data) { tcp_close_connection(s); return 0; } if (getpeername(s, (struct sockaddr *) &client_address, &client_socklen) == 0) { ip = inet_ntoa(client_address.sin_addr); cport = ntohs(client_address.sin_port); } else { perror("ERROR: Could not resolve remote ip value\n"); exit(errno); } /* Mostra o dado enviado */ printf("TCP From %s:%d %s\n", ip, cport, bufin); app(bufout, bufin); /* Envia eco */ if (send(s, bufout, MAX_LINE, 0) == -1) { perror("ERROR: unable to send data"); exit(errno); } return has_data; } static inline int tcp_handler(int port, void (*app)(char *, char*)) { struct sockaddr_in socket; int s, new_s; int i; int clients[FD_SETSIZE-1] = {0}; int max_fd; fd_set sockets; set_socket(&socket, &s, port, SOCK_STREAM); if (listen(s, MAX_PENDING)) { perror("ERROR: Unable to invoke listen method"); exit(errno); } while (1) { FD_ZERO(&sockets); FD_SET(s, &sockets); max_fd = s; for (i = 0; i < FD_SETSIZE-1; i++) { if (clients[i] > 0) { FD_SET(clients[i], &sockets); if (clients[i] > max_fd) max_fd = clients[i]; } } if (select(max_fd + 1, &sockets, NULL, NULL, NULL) < 0) { if (errno == EINTR) { continue; } perror("ERROR: select() failed"); exit(errno); } // caso haja nova conexão esperando if (FD_ISSET(s, &sockets)) { /* aguardar/aceita conexão, receber e imprimir texto na tela, enviar eco */ if ((new_s = accept(s, (struct sockaddr *)NULL, NULL)) == -1) { perror("ERROR: Unable to get client socket"); exit(errno); } for (i = 0; i < FD_SETSIZE-1; i++){ if (clients[i] == 0) { clients[i] = new_s; break; } } if (i == FD_SETSIZE-1) { printf("ERROR: maximum clients reached, closing new connection\n"); tcp_close_connection(new_s); } /* Chama a função que lida com a nova conexão */ tcp_handle_new_connection(new_s); } // procurar sockets prontos pra leitura for (i = 0; i < FD_SETSIZE-1; i++) { if (clients[i] > 0 && FD_ISSET(clients[i], &sockets)) { // lidar com dados, e remover socket se necessário if (tcp_handle_data(clients[i], app) == 0) { clients[i] = 0; } } } } /* Fecha o socket do servidor */ if (close(s) == -1) { perror("ERROR: unable to close socket"); exit(errno); } return 0; } static inline int udp_handler(int port, void (*app)(char *, char *)) { struct sockaddr_in socket, remote; int s, has_data; char bufin[MAX_LINE], bufout[MAX_LINE]; socklen_t len = sizeof(remote); set_socket(&socket, &s, port, SOCK_DGRAM); //printf("Server UDP port: %d\n",ntohs(socket_address.sin_port)); while (1) { memset(bufin, '\0', MAX_LINE); memset(bufout, '\0', MAX_LINE); has_data = recvfrom(s, bufin, MAX_LINE, 0, (struct sockaddr *)&remote, &len); if (!has_data) printf("ERROR: Unable to receive data\n"); else { printf("UDP From %s:%d %s\n", inet_ntoa(remote.sin_addr), ntohs(remote.sin_port), bufin); app(bufout, bufin); sendto(s, bufout, has_data, 0, (struct sockaddr *) &remote, len); } } if (close(s) == -1) { perror("ERROR: unable to close socket"); exit(errno); } return 0; } int server(int sec_tcp_p, int sec_udp_p, int ec_tcp_p, int ec_udp_p, void (*sec)(char *, char *), void (*ent_con)(char *, char *)) { int tid; #pragma omp parallel private(tid) shared(sec_udp_p, sec_tcp_p, ec_tcp_p, ec_udp_p, sec, ent_con) num_threads(4) { tid = omp_get_thread_num(); // Security - TCP if (tid == 0) { tcp_handler(sec_tcp_p, sec); // Security - UDP } else if (tid == 1) { udp_handler(sec_udp_p, sec); // Entertainment / Confort - TCP } else if (tid == 2) { tcp_handler(ec_tcp_p, ent_con); // Entertainment / Confort - UDP } else { udp_handler(ec_udp_p, ent_con); } } return 0; }
a.20.2.c
/* { dg-do compile } */ void a20 () { int a = 1; #pragma omp parallel { if (a != 0) { #pragma omp flush(a) } if (a != 0) { #pragma omp barrier } } }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int *outptr0 = out0; const signed char *img0 = bottom_blob.channel(q); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4*4, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n ] = d0[n]; out_tm0[n+ 4] = d1[n]; out_tm0[n+ 8] = d2[n]; out_tm0[n+12] = d3[n]; } r0 += 2; r1 += 2; r2 += 2; r3 += 2; out_tm0 += 16; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); for (int i=0; i<tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int* output1_tm = out1_tm.row<int>(i); int* output2_tm = out2_tm.row<int>(i); int* output3_tm = out3_tm.row<int>(i); int sum0[16] = {0}; int sum1[16] = {0}; int sum2[16] = {0}; int sum3[16] = {0}; int q = 0; for (; q+3<inch; q+=4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; k0 += 16; sum0[n] += (int)r1[n] * k0[n]; k0 += 16; sum0[n] += (int)r2[n] * k0[n]; k0 += 16; sum0[n] += (int)r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += (int)r0[n] * k1[n]; k1 += 16; sum1[n] += (int)r1[n] * k1[n]; k1 += 16; sum1[n] += (int)r2[n] * k1[n]; k1 += 16; sum1[n] += (int)r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += (int)r0[n] * k2[n]; k2 += 16; sum2[n] += (int)r1[n] * k2[n]; k2 += 16; sum2[n] += (int)r2[n] * k2[n]; k2 += 16; sum2[n] += (int)r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += (int)r0[n] * k3[n]; k3 += 16; sum3[n] += (int)r1[n] * k3[n]; k3 += 16; sum3[n] += (int)r2[n] * k3[n]; k3 += 16; sum3[n] += (int)r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q<inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum1[n] += (int)r0[n] * k1[n]; sum2[n] += (int)r0[n] * k2[n]; sum3[n] += (int)r0[n] * k3[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i=0; i<tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[16] = {0}; int q = 0; for (; q+3<inch; q+=4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel0_tm.row<short>(q+1); const short* k2 = kernel0_tm.row<short>(q+2); const short* k3 = kernel0_tm.row<short>(q+3); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum0[n] += (int)r1[n] * k1[n]; sum0[n] += (int)r2[n] * k2[n]; sum0[n] += (int)r3[n] * k3[n]; } } for (; q<inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j=0; j<nColBlocks; j++) { int* outRow0 = out.row<int>(j*2); int* outRow1 = out.row<int>(j*2+1); for(int i=0; i<nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j*nRowBlocks + i); int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; outRow0 += 2; outRow1 += 2; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int *outptr0 = out0; const signed char *img0 = bottom_blob.channel(q); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } } static void conv3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt); } static void conv3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt); } static void conv3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt); } static void conv3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt); }
many-microtask-args.c
// RUN: %libomp-compile-and-run #include <stdio.h> int main() { int i1 = 0; int i2 = 1; int i3 = 2; int i4 = 3; int i5 = 4; int i6 = 6; int i7 = 7; int i8 = 8; int i9 = 9; int i10 = 10; int i11 = 11; int i12 = 12; int i13 = 13; int i14 = 14; int i15 = 15; int i16 = 16; int r = 0; #pragma omp parallel for firstprivate(i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15, i16) reduction(+:r) for (int i = 0; i < i16; i++) { r += i + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + i10 + i11 + i12 + i13 + i14 + i15 + i16; } int rf = 2216; if (r != rf) { fprintf(stderr, "r should be %d but instead equals %d\n", rf, r); return 1; } return 0; }
displacement_residual_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementResidualContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * This class implements a convergence control based on nodal displacement (for penalty contact) * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementResidualContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementResidualContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementResidualContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementResidualContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // The displacement residual mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation residual mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } //* Copy constructor. DisplacementResidualContactCriteria( DisplacementResidualContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) { } /// Destructor. ~DisplacementResidualContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0; IndexType disp_dof_num(0); TDataType rot_residual_solution_norm = 0.0; IndexType rot_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0; // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for reduction(+:disp_residual_solution_norm,disp_dof_num,rot_residual_solution_norm,rot_dof_num,dof_id,residual_dof_value) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; if (it_dof->IsFree()) { dof_id = it_dof->EquationId(); residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((*p_check_disp)(r_curr_var)) { disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; TDataType residual_disp_ratio = 1.0; TDataType residual_rot_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; residual_disp_ratio = 1.0; if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We calculate the absolute norms const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance; } else { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } } else { KRATOS_INFO("DisplacementResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } } } } r_process_info[CONVERGENCE_RATIO] = residual_disp_ratio; r_process_info[RESIDUAL_NORM] = residual_disp_abs; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; if (disp_converged && rot_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { // Initialize BaseType::mConvergenceCriteriaIsInitialized = true; // Check rotation dof mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); // Initialize header ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_residual_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_residual_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation residual mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementResidualContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4)); } #endif /* KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H */
par_strength.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" #include "hypre_hopscotch_hash.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, memory_location); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, memory_location); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = memory_location; hypre_CSRMatrixMemoryLocation(S_offd) = memory_location; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* ----------------------------------------------------------------------- */ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CreateS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSHost(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /* ----------------------------------------------------------------------- */ /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Int *dof_func_offd = NULL; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int num_sends, start, j, index; HYPRE_Int *int_buf_data; HYPRE_Int ierr = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *prefix_sum_workspace; HYPRE_Int my_id; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ hypre_MPI_Comm_rank(comm, &my_id); num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[jj])) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[A_offd_j[jA]])) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ } /* num_functions <=1 */ /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* end diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions <=1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, memory_location); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = memory_location; hypre_CSRMatrixMemoryLocation(S_offd) = memory_location; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, memory_location); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = fabs(diag); if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */ if ( fabs(row_sum) < fabs(diag)*(2.0-max_row_sum) && max_row_sum < 1.0 ) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_A; i++) S_marker[i] = -1; for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j-cnt-recv_vec_starts_A[i]+recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A+num_recvs_A, HYPRE_MEMORY_HOST); j=0; for (i=0; i < num_sends_A; i++) hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i], 0,comm,&requests[j++]); for (i=0; i < num_recvs_A; i++) hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i], 0,comm,&requests[j++]); status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j,requests,status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i=0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i]) num_sends_S++; } else num_sends_S++; total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S+1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i=0; i < num_sends_A; i++) { cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i=0; i < send_map_starts_S[num_sends_S]; i++) send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i]-first_row); hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndSHost( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j=NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1]-1; if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1]; hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) num_coarse_private++; } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end+1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) index++; } for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) index++; } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]); if (num_nonzeros) S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i=0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { jrow = send_map_elmts[j]; for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]]+my_first_cpt; } for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } tmp_send_map_starts[i+1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i=0; i < recv_vec_starts[num_recvs]; i++) S_ext_i[i+1] += S_ext_i[i]; num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_recv_vec_starts[0] = 0; for (i=0; i < num_recvs; i++) tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]]; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,S_int_j,S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16*hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else S_ext_diag_size_private++; } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_big_offd_j[S_ext_offd_size_private++] = big_i1; //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i=0; i < num_cols_offd_S; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) S_ext_offd_size++; else S_ext_diag_size++; } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i=0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) num_coarse_offd++; for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_ext_j[cnt_offd++] = big_i1; else S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; if (S_ext_offd_size || num_coarse_offd) hypre_TFree(temp, HYPRE_MEMORY_HOST); for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else map_S_to_C[i] = -1; } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); if (num_cols_offd_C) S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse*my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C*my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse*my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C*my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) S_marker = S_marker_array + num_coarse*my_thread_num; if (num_cols_offd_C) S_marker_offd = S_marker_offd_array + num_cols_offd_C*my_thread_num; for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic+1] == C_diag_i[ic] && C_offd_i[ic+1] == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2*(my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2*(my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); hypre_ParCSRMatrixOwnsRowStarts(S2) = 0; C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } hypre_CSRMatrixMemoryLocation(C_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_HOST; *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } //----------------------------------------------------------------------- HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("Create2ndS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(S)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreate2ndSDevice( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } else #endif { ierr = hypre_BoomerAMGCreate2ndSHost( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (CF_marker[i] == 1) CF_marker[i] = new_CF_marker[cnt++]; else { CF_marker[i] = 1; cnt++;} } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (new_CF_marker[cnt] == -1) CF_marker[i] = -2; else CF_marker[i] = 1; cnt++; } } return 0; }
direct.c
#define ERRORTEST 0 #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/time.h> #ifdef ENABLE_OPENMP #include <omp.h> #endif #include "gp5util.h" #define NJMAX (JMEMSIZE) // #define NJMAX 65536 #define rdtscll(val) do { \ unsigned int a,d; \ asm volatile("rdtsc" : "=a"(a), "=d"(d)); \ (val) = ((unsigned long)a) | (((unsigned long)d)<<32); \ } while(0) #if 1 #define GIGAHELTZ 3.8 double get_dtime(void){ struct timeval tv; gettimeofday(&tv, NULL); return ((double)(tv.tv_sec) + (double)(tv.tv_usec) * 0.001 * 0.001); } #endif void get_cputime(double *laptime, double *sprittime); void readnbody(int *nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname) { int i, dummy, fi; double dummyd; FILE *fp; fp = fopen(fname, "r"); if (fp == NULL) { perror("readnbody"); exit(1); } fi = fscanf(fp, "%d\n", nj); fi = fscanf(fp, "%d\n", &dummy); fi = fscanf(fp, "%lf\n", &dummyd); fi = fprintf(stderr, "nj: %d\n", *nj); for (i = 0; i < *nj; i++) { fi = fscanf(fp, "%lf\n", mj+i); } for (i = 0; i < *nj; i++) { fi = fscanf(fp, "%lf %lf %lf\n", xj[i]+0, xj[i]+1, xj[i]+2); } for (i = 0; i < *nj; i++) { fi = fscanf(fp, "%lf %lf %lf\n", vj[i]+0, vj[i]+1, vj[i]+2); } } void writenbody(int nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname) { int i; FILE *fp; fp = fopen(fname, "w"); fprintf(fp, "%d\n", nj); fprintf(fp, "%d\n", 3); fprintf(fp, "%e\n", 0.0); for (i = 0; i < nj; i++) { fprintf(fp, "%e\n", mj[i]); } for (i = 0; i < nj; i++) { fprintf(fp, "%e %e %e\n", xj[i][0], xj[i][1], xj[i][2]); } for (i = 0; i < nj; i++) { fprintf(fp, "%e %e %e\n", vj[i][0], vj[i][1], vj[i][2]); } } void calc_gravity(double *mj, double (*xj)[3], double (*vj)[3], double eps, double (*a)[3], double *p, int nj) { double epsinv; int i; double cycle; // g5_set_xj(0, nj, xj); // g5_set_mj(0, nj, mj); g5_set_xmj(0, nj, xj, mj); g5_set_eps_to_all(eps); g5_set_n(nj); double st1 = get_dtime(); g5_calculate_force_on_x(xj, a, p, nj); double st2 = get_dtime(); cycle = (double)(st2 - st1) * GIGAHELTZ * 1e9 / ((double)nj*(double)nj/4); #ifdef ENABLE_OPENMP #pragma omp parallel #if 1 { if(omp_get_thread_num() == 0) cycle *= omp_get_num_threads(); } #else cycle *= omp_get_num_threads(); #endif #endif printf("gravity %f cycle per loop\n", cycle); for (i = 0; i < nj; i++) { p[i] = -p[i]; } if (eps != 0.0) { epsinv = 1.0/eps; for (i = 0; i < nj; i++) { p[i] = p[i] + mj[i] * epsinv; } } } #ifdef SYMMETRIC void calc_gravity0(double *mj, double (*xj)[3], double (*vj)[3], double *epsj2, double (*a)[3], double *p, int nj) { double epsinv; int i; double cycle; g5_set_xmj0(0, nj, xj, mj, epsj2); g5_set_n(nj); double st1 = get_dtime(); g5_calculate_force_on_x0(xj, a, p, nj, epsj2); double st2 = get_dtime(); cycle = (double)(st2 - st1) * GIGAHELTZ * 1e9 / ((double)nj*(double)nj/4); #ifdef ENABLE_OPENMP #pragma omp parallel #if 1 { if(omp_get_thread_num() == 0) cycle *= omp_get_num_threads(); } #else cycle *= omp_get_num_threads(); #endif #endif printf("gravity %f cycle per loop\n", cycle); for (i = 0; i < nj; i++) { p[i] = -p[i]; if (epsj2[i] != 0.0) { epsinv = 1.0 / (sqrt(2.0) * sqrt(epsj2[i])); p[i] = p[i] + mj[i] * epsinv; } } } #endif void push_velocity(double (*vj)[3], double (*a)[3], double dt, int nj) { int j, k; for (j = 0; j < nj; j++) { for (k = 0; k < 3; k++) { vj[j][k] += dt * a[j][k]; } } } void push_position(double (*xj)[3], double (*vj)[3], double (*a)[3], double dt, int nj) { int j, k; for (j = 0; j < nj; j++) { for (k = 0; k < 3; k++) { xj[j][k] += dt * vj[j][k]; } } } void energy(double *mj, double (*vj)[3], double *p, int nj, double *ke, double *pe) { int i, k; *pe = 0; *ke = 0; for (i = 0; i < nj; i++) { *pe += mj[i] * p[i]; for (k = 0; k < 3; k++) { *ke += 0.5 * mj[i] * vj[i][k] * vj[i][k]; } } *pe /= 2.0; } int main(int argc, char **argv) { static double mj[NJMAX], xj[NJMAX][3], vj[NJMAX][3], epsj2[NJMAX]; static double a[NJMAX][3], p[NJMAX]; double xmax, xmin, mmin; double time; // double eps, dt, endt; double dt, endt; double e, e0, ke, pe; double LapTime, SpritTime, IntPerSec, Gflops; int nj; int nstep, step; dt = 0.01; endt = 10.0; time = 0.0; nstep = endt/dt; xmax = 10.0; xmin = -10.0; if (argc < 3) { fprintf(stderr, "usage: %s <infile> <outfile>\n", argv[0]); exit(1); } readnbody(&nj, mj, xj, vj, argv[1]); mmin = mj[0]; #if ERRORTEST == 1 double eps; eps = 4.0 / (double)nj; #else #ifdef SYMMETRIC int i; for(i = 0; i < nj; i++) epsj2[i] = (0.01 + 0.01 * (double)i / (double)nj) * (0.01 + 0.01 * (double)i / (double)nj); // mj[1021] = 1.0; // mj[1022] = 1.0; // mj[1023] = 1.0; #else double eps; eps = 0.02; #endif #endif g5_open(); g5_set_range(xmin, xmax, mmin); #ifdef SYMMETRIC calc_gravity0(mj, xj, vj, epsj2, a, p, nj); #else calc_gravity(mj, xj, vj, eps, a, p, nj); #endif energy(mj, vj, p, nj, &ke, &pe); e0 = ke+pe; #if ERRORTEST == 1 int i; char out[1024]; FILE *fp; sprintf(out, "pl%03dk_eps4n_avx.ap", nj / 1024); fp = fopen(out, "w"); for(i = 0; i < nj; i++) fprintf(fp, "%5d %+.16e %+.16e\n", i, sqrt(a[i][0]*a[i][0]+a[i][1]*a[i][1]+a[i][2]*a[i][2]), p[i]); fclose(fp); exit(0); #endif // TimeStart = (double)clock() / CLOCKS_PER_SEC; get_cputime(&LapTime, &SpritTime); for (step = 1; step < nstep; step++) { push_velocity(vj, a, 0.5*dt, nj); push_position(xj, vj, a, dt, nj); time = time + dt; #ifdef SYMMETRIC calc_gravity0(mj, xj, vj, epsj2, a, p, nj); #else calc_gravity(mj, xj, vj, eps, a, p, nj); #endif push_velocity(vj, a, 0.5*dt, nj); #ifdef ANIM plot_star(xj, nj, time, 0.3, mj, mj[0]); #endif /* ANIM */ if (step % (nstep/10) == 0) { energy(mj, vj, p, nj, &ke, &pe); e = ke+pe; // TimeEnd = (double)clock() / CLOCKS_PER_SEC; get_cputime(&LapTime, &SpritTime); IntPerSec = ((double)nj * (double)nj * (long)(nstep/10)) / LapTime; Gflops = IntPerSec * 38. * 1.e-9; printf("step: %d time: %e\n", step, time); printf("e: %e de: %e\n", e, e-e0); printf("ke: %e pe: %e\n", ke, pe); printf("ke/pe: %e\n\n", ke/pe); printf("%e interaction per sec, %f Gflops \n", IntPerSec, Gflops); // TimeStart = TimeEnd; } } g5_close(); writenbody(nj, mj, xj, vj, argv[2]); return 0; }
GB_binop__bxnor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int8) // C=scalar+B GB (_bind1st__bxnor_int8) // C=scalar+B' GB (_bind1st_tran__bxnor_int8) // C=A+scalar GB (_bind2nd__bxnor_int8) // C=A'+scalar GB (_bind2nd_tran__bxnor_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT8 || GxB_NO_BXNOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_selector.c
//------------------------------------------------------------------------------ // GB_selector: select entries from a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // GB_selector does the work for GB_select and the GxB_*select methods. It // also deletes zombies for GB_wait using the NONZOMBIE operator, and deletes // entries outside a smaller matrix for GxB_*resize. // TODO: GB_selector does not exploit the mask. // If C is NULL on input, A is modified in-place. // Otherwise, C is an uninitialized static header. #include "GB_select.h" #include "GB_ek_slice.h" #include "GB_sel__include.h" #include "GB_scalar.h" #include "GB_transpose.h" #define GB_FREE_WORKSPACE \ { \ GB_FREE_WORK (&Zp, Zp_size) ; \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; \ GB_FREE (&Cp, Cp_size) ; \ GB_FREE (&Ch, Ch_size) ; \ GB_FREE (&Ci, Ci_size) ; \ GB_FREE (&Cx, Cx_size) ; \ } #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } GrB_Info GB_selector ( GrB_Matrix C, // output matrix, NULL or static header GB_Opcode opcode, // selector opcode const GB_Operator op, // user operator, NULL for resize/nonzombie const bool flipij, // if true, flip i and j for user operator GrB_Matrix A, // input matrix int64_t ithunk, // (int64_t) Thunk, if Thunk is NULL const GrB_Scalar Thunk, // optional input for select operator GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_OP_OK_OR_NULL (op, "selectop/idxunop for GB_selector", GB0) ; ASSERT_SCALAR_OK_OR_NULL (Thunk, "Thunk for GB_selector", GB0) ; ASSERT (GB_IS_SELECTOP_CODE (opcode) || GB_IS_INDEXUNARYOP_CODE (opcode)) ; ASSERT_MATRIX_OK (A, "A input for GB_selector", GB_FLIP (GB0)) ; // positional selector (tril, triu, diag, offdiag, resize, rowindex, ...): // can't be jumbled. nonzombie, entry-valued op, user op: jumbled OK ASSERT (GB_IMPLIES (GB_OPCODE_IS_POSITIONAL (opcode), !GB_JUMBLED (A))) ; ASSERT (C == NULL || (C != NULL && C->static_header)) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- bool in_place_A = (C == NULL) ; // GrB_wait and GB_resize only int64_t *restrict Zp = NULL ; size_t Zp_size = 0 ; GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; const bool A_iso = A->iso ; int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ; int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ; int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ; GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ; //-------------------------------------------------------------------------- // get Thunk //-------------------------------------------------------------------------- // The scalar value of Thunk has already been typecasted to an integer // (int64_t ithunk). // It is also now typecast to the same type as A (to the scalar athunk) // which is required for GxB_SelectOps, and to the op->ytype (the scalar // ythunk) for GrB_IndexUnaryOps. // If Thunk is NULL, or has no entry, it is treated as a scalar value // of zero. const size_t asize = A->type->size ; const GB_Type_code acode = A->type->code ; GrB_Type ytype = NULL, xtype = NULL ; GB_Type_code ycode = GB_ignore_code, xcode = GB_ignore_code ; size_t ysize = 1, xsize = 1 ; if (op != NULL) { if (op->ytype != NULL) { // get the type of the thunk input of the operator ytype = op->ytype ; ycode = ytype->code ; ysize = ytype->size ; } if (op->xtype != NULL) { // get the type of the A input of the operator xtype = op->xtype ; xcode = xtype->code ; xsize = xtype->size ; } } // athunk = (A->type) Thunk, for selectop thunk comparators only GB_void athunk [GB_VLA(asize)] ; memset (athunk, 0, asize) ; // ythunk = (op->ytype) Thunk, for idxnunop GB_void ythunk [GB_VLA(ysize)] ; memset (ythunk, 0, ysize) ; bool op_is_selectop = GB_IS_SELECTOP_CODE (opcode) ; bool op_is_idxunop = GB_IS_INDEXUNARYOP_CODE (opcode) ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; if (Thunk != NULL) { // Thunk is passed to GB_selector only if it is non-empty ASSERT (GB_nnz ((GrB_Matrix) Thunk) > 0) ; const GB_Type_code tcode = Thunk->type->code ; if (op_is_selectop && opcode != GB_USER_selop_code) { // athunk = (atype) Thunk, for built-in GxB_SelectOps only GB_cast_scalar (athunk, acode, Thunk->x, tcode, asize) ; } if (ytype != NULL) { // ythunk = (op->ytype) Thunk GB_cast_scalar (ythunk, ycode, Thunk->x, tcode, ysize) ; } } //-------------------------------------------------------------------------- // handle iso case for built-in select ops that depend only on the value //-------------------------------------------------------------------------- bool op_is_select_valued = opcode >= GB_NONZERO_selop_code && opcode <= GB_LE_THUNK_selop_code ; bool op_is_idxunop_valued = opcode >= GB_VALUENE_idxunop_code && opcode <= GB_VALUELE_idxunop_code ; if (A_iso && (op_is_select_valued || op_is_idxunop_valued)) { // select op is NONZERO, EQ_ZERO, GT_ZERO, GE_ZERO, LT_ZERO, LE_ZERO, // EQ_THUNK, GT_THUNK, GE_THUNK, LT_THUNK, or LE_THUNK, or the idxunop // VALUE* operators. All of these select/idxunop ops depend only on // the value of A(i,j). Since A is iso, either all entries in A will // be copied to C and thus C can be created as a shallow copy of A, or // no entries from A will be copied to C and thus C is an empty matrix. // The select factory is not needed, except to check the iso value via // GB_bitmap_selector. ASSERT (!in_place_A) ; ASSERT (C != NULL && C->static_header) ; // construct a scalar containing the iso scalar of A // xscalar = (op->xtype) A->x for idxunops GB_void xscalar [GB_VLA(xsize)] ; memset (xscalar, 0, xsize) ; struct GB_Scalar_opaque S_header ; GrB_Scalar S ; if (op_is_select_valued) { // wrap the iso-value of A in the scalar S, with no typecasting S = GB_Scalar_wrap (&S_header, A->type, A->x) ; } else { // wrap the iso-value of A in the scalar S, typecasted to xtype // xscalar = (op->xtype) A->x GB_cast_scalar (xscalar, xcode, A->x, acode, asize) ; S = GB_Scalar_wrap (&S_header, xtype, xscalar) ; } S->iso = false ; // but ensure S is not iso ASSERT_SCALAR_OK (S, "iso scalar wrap", GB0) ; // apply the select operator to the iso scalar S GB_OK (GB_bitmap_selector (C, false, opcode, op, false, (GrB_Matrix) S, ithunk, athunk, ythunk, Context)) ; ASSERT_MATRIX_OK (C, "C from iso scalar test", GB0) ; bool C_empty = (GB_nnz (C) == 0) ; GB_phbix_free (C) ; // check if C has 0 or 1 entry if (C_empty) { // C is an empty matrix return (GB_new (&C, true, // static header A->type, avlen, avdim, GB_Ap_calloc, true, GxB_SPARSE + GxB_HYPERSPARSE, GB_Global_hyper_switch_get ( ), 1, Context)) ; } else { // C is a shallow copy of A with all the same entries as A // set C->iso = A->iso OK return (GB_shallow_copy (C, true, A, Context)) ; } } // now if A is iso, the following operators still need to be handled: // GB_TRIL_selop_code : use GB_sel__tril_iso // GB_TRIU_selop_code : use GB_sel__triu_iso // GB_DIAG_selop_code : use GB_sel__diag_iso // GB_OFFDIAG_selop_code : use GB_sel__offdiag_iso // GB_NONZOMBIE_selop_code : use GB_sel__nonzombie_iso // GB_USER_selop_code : use GB_sel__user_iso // GB_ROWINDEX_idxunop_code : use GB_sel__rowindex_iso // GB_ROWLE_idxunop_code : use GB_sel__rowle_iso // GB_ROWGT_idxunop_code : use GB_sel__rowle_iso // all other idxunop : use GB_sel__idxunop_iso // column selectors are handled below: // GB_COLINDEX_idxunop_code : // GB_COLLE_idxunop_code : // GB_COLGT_idxunop_code : // Except for GB_USER_selop_code and idxunop, the GB_sel__*_iso methods do // not access the values of A and C, just the pattern. //-------------------------------------------------------------------------- // handle the bitmap/as-if-full case //-------------------------------------------------------------------------- bool use_bitmap_selector ; if (opcode == GB_NONZOMBIE_selop_code || in_place_A) { // GB_bitmap_selector does not support the nonzombie opcode, nor does // it support operating on A in place. For the NONZOMBIE operator, A // will never be bitmap. use_bitmap_selector = false ; } else if (opcode == GB_DIAG_selop_code) { // GB_bitmap_selector supports the DIAG operator, but it is currently // not efficient (GB_bitmap_selector should return a sparse diagonal // matrix, not bitmap). So use the sparse case if A is not bitmap, // since the sparse case below does not support the bitmap case. use_bitmap_selector = GB_IS_BITMAP (A) ; } else { // For bitmap, full, or as-if-full matrices (sparse/hypersparse with // all entries present, not jumbled, no zombies, and no pending // tuples), use the bitmap selector for all other operators (TRIL, // TRIU, OFFDIAG, NONZERO, EQ*, GT*, GE*, LT*, LE*, and user-defined // operators). use_bitmap_selector = GB_IS_BITMAP (A) || GB_as_if_full (A) ; } //-------------------------------------------------------------------------- // determine if C is iso for a non-iso A //-------------------------------------------------------------------------- bool C_iso = A_iso || // C iso value is Ax [0] (opcode == GB_EQ_ZERO_selop_code) || // C iso value is zero (opcode == GB_EQ_THUNK_selop_code) || // C iso value is thunk (opcode == GB_NONZERO_selop_code && acode == GB_BOOL_code) ; // C iso value is true if (C_iso) { GB_BURBLE_MATRIX (A, "(iso select) ") ; } //========================================================================== // bitmap/full case //========================================================================== if (use_bitmap_selector) { GB_BURBLE_MATRIX (A, "(bitmap select) ") ; ASSERT (C != NULL && C->static_header) ; return (GB_bitmap_selector (C, C_iso, opcode, op, flipij, A, ithunk, athunk, ythunk, Context)) ; } //========================================================================== // sparse/hypersparse case //========================================================================== //-------------------------------------------------------------------------- // determine the max number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // get A: sparse, hypersparse, or full //-------------------------------------------------------------------------- // the case when A is bitmap is always handled above by GB_bitmap_selector ASSERT (!GB_IS_BITMAP (A)) ; int64_t *restrict Ap = A->p ; size_t Ap_size = A->p_size ; int64_t *restrict Ah = A->h ; int64_t *restrict Ai = A->i ; size_t Ai_size = A->i_size ; GB_void *restrict Ax = (GB_void *) A->x ; size_t Ax_size = A->x_size ; int64_t anvec = A->nvec ; bool A_jumbled = A->jumbled ; bool A_is_hyper = (Ah != NULL) ; //========================================================================== // column selector //========================================================================== // The column selectors can be done in a single pass. if (opcode == GB_COLINDEX_idxunop_code || opcode == GB_COLLE_idxunop_code || opcode == GB_COLGT_idxunop_code) { //---------------------------------------------------------------------- // find column j in A //---------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A for col selector", GB_FLIP (GB0)) ; int nth = nthreads_max ; ASSERT (!in_place_A) ; ASSERT (C != NULL && C->static_header) ; ASSERT (GB_JUMBLED_OK (A)) ; int64_t j = (opcode == GB_COLINDEX_idxunop_code) ? (-ithunk) : ithunk ; int64_t k = 0 ; bool found ; if (j < 0) { // j is outside the range of columns of A k = 0 ; found = false ; } else if (j >= avdim) { // j is outside the range of columns of A k = anvec ; found = false ; } else if (A_is_hyper) { // find the column j in the hyperlist of A int64_t kright = anvec-1 ; GB_SPLIT_BINARY_SEARCH (j, Ah, k, kright, found) ; // if found is true the Ah [k] == j // if found is false, then Ah [0..k-1] < j and Ah [k..anvec-1] > j } else { // j appears as the jth column in A; found is always true k = j ; found = true ; } //---------------------------------------------------------------------- // determine the # of entries and # of vectors in C //---------------------------------------------------------------------- int64_t pstart = Ap [k] ; int64_t pend = found ? Ap [k+1] : pstart ; int64_t ajnz = pend - pstart ; int64_t cnz, cnvec ; int64_t anz = Ap [anvec] ; if (opcode == GB_COLINDEX_idxunop_code) { // COLINDEX: delete column j: C = A (:, [0:j-1 j+1:end]) cnz = anz - ajnz ; cnvec = (A_is_hyper && found) ? (anvec-1) : anvec ; } else if (opcode == GB_COLLE_idxunop_code) { // COLLE: C = A (:, 0:j) cnz = pend ; cnvec = (A_is_hyper) ? (found ? (k+1) : k) : anvec ; } else // (opcode == GB_COLGT_idxunop_code) { // COLGT: C = A (:, j+1:end) cnz = anz - pend ; cnvec = anvec - ((A_is_hyper) ? (found ? (k+1) : k) : 0) ; } if (cnz == anz) { // C is the same as A: return it a pure shallow copy return (GB_shallow_copy (C, true, A, Context)) ; } else if (cnz == 0) { // return C as empty return (GB_new (&C, true, // auto (sparse or hyper), static header A->type, avlen, avdim, GB_Ap_calloc, true, GxB_HYPERSPARSE, GB_Global_hyper_switch_get ( ), 1, Context)) ; } //---------------------------------------------------------------------- // allocate C //---------------------------------------------------------------------- int sparsity = (A_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE ; GB_OK (GB_new_bix (&C, true, // sparse or hyper (from A), static header A->type, avlen, avdim, GB_Ap_malloc, true, sparsity, false, A->hyper_switch, cnvec, cnz, true, A_iso, Context)) ; ASSERT (info == GrB_SUCCESS) ; int nth2 = GB_nthreads (cnvec, chunk, nth) ; int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = (GB_void *) C->x ; int64_t kk ; //---------------------------------------------------------------------- // construct C //---------------------------------------------------------------------- if (A_iso) { // Cx [0] = Ax [0] memcpy (Cx, Ax, asize) ; } if (opcode == GB_COLINDEX_idxunop_code) { //------------------------------------------------------------------ // COLINDEX: delete the column j //------------------------------------------------------------------ if (A_is_hyper) { ASSERT (found) ; // Cp [0:k-1] = Ap [0:k-1] GB_memcpy (Cp, Ap, k * sizeof (int64_t), nth) ; // Cp [k:cnvec] = Ap [k+1:anvec] - ajnz #pragma omp parallel for num_threads(nth2) for (kk = k ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk+1] - ajnz ; } // Ch [0:k-1] = Ah [0:k-1] GB_memcpy (Ch, Ah, k * sizeof (int64_t), nth) ; // Ch [k:cnvec-1] = Ah [k+1:anvec-1] GB_memcpy (Ch + k, Ah + (k+1), (cnvec-k) * sizeof (int64_t), nth) ; } else { // Cp [0:k] = Ap [0:k] GB_memcpy (Cp, Ap, (k+1) * sizeof (int64_t), nth) ; // Cp [k+1:anvec] = Ap [k+1:anvec] - ajnz #pragma omp parallel for num_threads(nth2) for (kk = k+1 ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk] - ajnz ; } } // Ci [0:pstart-1] = Ai [0:pstart-1] GB_memcpy (Ci, Ai, pstart * sizeof (int64_t), nth) ; // Ci [pstart:cnz-1] = Ai [pend:anz-1] GB_memcpy (Ci + pstart, Ai + pend, (cnz - pstart) * sizeof (int64_t), nth) ; if (!A_iso) { // Cx [0:pstart-1] = Ax [0:pstart-1] GB_memcpy (Cx, Ax, pstart * asize, nth) ; // Cx [pstart:cnz-1] = Ax [pend:anz-1] GB_memcpy (Cx + pstart * asize, Ax + pend * asize, (cnz - pstart) * asize, nth) ; } } else if (opcode == GB_COLLE_idxunop_code) { //------------------------------------------------------------------ // COLLE: C = A (:, 0:j) //------------------------------------------------------------------ if (A_is_hyper) { // Cp [0:cnvec] = Ap [0:cnvec] GB_memcpy (Cp, Ap, (cnvec+1) * sizeof (int64_t), nth) ; // Ch [0:cnvec-1] = Ah [0:cnvec-1] GB_memcpy (Ch, Ah, (cnvec) * sizeof (int64_t), nth) ; } else { // Cp [0:k+1] = Ap [0:k+1] ASSERT (found) ; GB_memcpy (Cp, Ap, (k+2) * sizeof (int64_t), nth) ; // Cp [k+2:cnvec] = cnz #pragma omp parallel for num_threads(nth2) for (kk = k+2 ; kk <= cnvec ; kk++) { Cp [kk] = cnz ; } } // Ci [0:cnz-1] = Ai [0:cnz-1] GB_memcpy (Ci, Ai, cnz * sizeof (int64_t), nth) ; if (!A_iso) { // Cx [0:cnz-1] = Ax [0:cnz-1] GB_memcpy (Cx, Ax, cnz * asize, nth) ; } } else // (opcode == GB_COLGT_idxunop_code) { //------------------------------------------------------------------ // COLGT: C = A (:, j+1:end) //------------------------------------------------------------------ if (A_is_hyper) { // Cp [0:cnvec] = Ap [k+found:anvec] - pend #pragma omp parallel for num_threads(nth2) for (kk = 0 ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk + k + found] - pend ; } // Ch [0:cnvec-1] = Ah [k+found:anvec-1] GB_memcpy (Ch, Ah + k + found, cnvec * sizeof (int64_t), nth) ; } else { ASSERT (found) ; // Cp [0:k] = 0 GB_memset (Cp, 0, (k+1) * sizeof (int64_t), nth) ; // Cp [k+1:cnvec] = Ap [k+1:cnvec] - pend #pragma omp parallel for num_threads(nth2) for (kk = k+1 ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk] - pend ; } } // Ci [0:cnz-1] = Ai [pend:anz-1] GB_memcpy (Ci, Ai + pend, cnz * sizeof (int64_t), nth) ; if (!A_iso) { // Cx [0:cnz-1] = Ax [pend:anz-1] GB_memcpy (Cx, Ax + pend * asize, cnz * asize, nth) ; } } //---------------------------------------------------------------------- // finalize the matrix, free workspace, and return result //---------------------------------------------------------------------- C->nvec = cnvec ; C->magic = GB_MAGIC ; C->jumbled = A_jumbled ; // C is jumbled if A is jumbled C->iso = C_iso ; // OK: burble already done above C->nvec_nonempty = GB_nvec_nonempty (C, Context) ; ASSERT_MATRIX_OK (C, "C output for GB_selector (column select)", GB0) ; return (GrB_SUCCESS) ; } //========================================================================== // all other select/idxunop operators //========================================================================== #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } //-------------------------------------------------------------------------- // allocate the new vector pointers of C //-------------------------------------------------------------------------- int64_t cnz = 0 ; Cp = GB_CALLOC (anvec+1, int64_t, &Cp_size) ; if (Cp == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- int A_ntasks, A_nthreads ; double work = 8*anvec + ((opcode == GB_DIAG_selop_code) ? 0 : GB_nnz_held (A)) ; GB_SLICE_MATRIX_WORK (A, 8, chunk, work) ; //-------------------------------------------------------------------------- // allocate workspace for each task //-------------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + A_ntasks ; Cp_kfirst = Work + A_ntasks * 2 ; //-------------------------------------------------------------------------- // allocate workspace for phase1 //-------------------------------------------------------------------------- // phase1 counts the number of live entries in each vector of A. The // result is computed in Cp, where Cp [k] is the number of live entries in // the kth vector of A. Zp [k] is the location of the A(i,k) entry, for // positional operators. if (op_is_positional) { // allocate Zp Zp = GB_MALLOC_WORK (anvec, int64_t, &Zp_size) ; if (Zp == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // phase1: count the live entries in each column //-------------------------------------------------------------------------- // define the worker for the switch factory #define GB_SELECT_PHASE1 #define GB_sel1(opname,aname) GB (_sel_phase1_ ## opname ## aname) #define GB_SEL_WORKER(opname,aname,atype) \ { \ GB_sel1 (opname, aname) (Zp, Cp, Wfirst, Wlast, A, \ flipij, ithunk, (atype *) athunk, ythunk, op, \ A_ek_slicing, A_ntasks, A_nthreads) ; \ } \ break ; // launch the switch factory const GB_Type_code typecode = (A_iso) ? GB_ignore_code : acode ; #include "GB_select_factory.c" #undef GB_SELECT_PHASE1 #undef GB_SEL_WORKER //-------------------------------------------------------------------------- // cumulative sum of Cp and compute Cp_kfirst //-------------------------------------------------------------------------- int64_t C_nvec_nonempty ; GB_ek_slice_merge2 (&C_nvec_nonempty, Cp_kfirst, Cp, anvec, Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ; //-------------------------------------------------------------------------- // allocate new space for the compacted Ci and Cx //-------------------------------------------------------------------------- cnz = Cp [anvec] ; cnz = GB_IMAX (cnz, 1) ; Ci = GB_MALLOC (cnz, int64_t, &Ci_size) ; // use calloc since C is sparse, not bitmap Cx = (GB_void *) GB_XALLOC (false, C_iso, cnz, asize, &Cx_size) ; // x:OK if (Ci == NULL || Cx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // set the iso value of C //-------------------------------------------------------------------------- if (C_iso) { // The pattern of C is computed by the worker below, for the DIAG, // OFFDIAG, TRIL, TRIU, NONZOMBIE, and USER select operators. GB_iso_select (Cx, opcode, athunk, Ax, acode, asize) ; } //-------------------------------------------------------------------------- // phase2: select the entries //-------------------------------------------------------------------------- // define the worker for the switch factory #define GB_SELECT_PHASE2 #define GB_sel2(opname,aname) GB (_sel_phase2_ ## opname ## aname) #define GB_SEL_WORKER(opname,aname,atype) \ { \ GB_sel2 (opname, aname) (Ci, (atype *) Cx, Zp, Cp, Cp_kfirst, A, \ flipij, ithunk, (atype *) athunk, ythunk, op, \ A_ek_slicing, A_ntasks, A_nthreads) ; \ } \ break ; // launch the switch factory #include "GB_select_factory.c" //-------------------------------------------------------------------------- // create the result //-------------------------------------------------------------------------- if (in_place_A) { //---------------------------------------------------------------------- // transplant Cp, Ci, Cx back into A //---------------------------------------------------------------------- // TODO: this is not parallel: use GB_hyper_prune if (A->h != NULL && C_nvec_nonempty < anvec) { // prune empty vectors from Ah and Ap int64_t cnvec = 0 ; for (int64_t k = 0 ; k < anvec ; k++) { if (Cp [k] < Cp [k+1]) { Ah [cnvec] = Ah [k] ; Ap [cnvec] = Cp [k] ; cnvec++ ; } } Ap [cnvec] = Cp [anvec] ; A->nvec = cnvec ; ASSERT (A->nvec == C_nvec_nonempty) ; GB_FREE (&Cp, Cp_size) ; } else { // free the old A->p and transplant in Cp as the new A->p GB_FREE (&Ap, Ap_size) ; A->p = Cp ; Cp = NULL ; A->p_size = Cp_size ; A->plen = anvec ; } ASSERT (Cp == NULL) ; GB_FREE (&Ai, Ai_size) ; GB_FREE (&Ax, Ax_size) ; A->i = Ci ; Ci = NULL ; A->i_size = Ci_size ; A->x = Cx ; Cx = NULL ; A->x_size = Cx_size ; A->nvec_nonempty = C_nvec_nonempty ; A->jumbled = A_jumbled ; // A remains jumbled (in-place select) A->iso = C_iso ; // OK: burble already done above // the NONZOMBIE opcode may have removed all zombies, but A->nzombie // is still nonzero. It is set to zero in GB_wait. ASSERT_MATRIX_OK (A, "A output for GB_selector", GB_FLIP (GB0)) ; } else { //---------------------------------------------------------------------- // create C and transplant Cp, Ch, Ci, Cx into C //---------------------------------------------------------------------- int sparsity = (A_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE ; ASSERT (C != NULL && C->static_header) ; info = GB_new (&C, true, // sparse or hyper (from A), static header A->type, avlen, avdim, GB_Ap_null, true, sparsity, A->hyper_switch, anvec, Context) ; ASSERT (info == GrB_SUCCESS) ; if (A->h != NULL) { //------------------------------------------------------------------ // A and C are hypersparse: copy non-empty vectors from Ah to Ch //------------------------------------------------------------------ Ch = GB_MALLOC (anvec, int64_t, &Ch_size) ; if (Ch == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // TODO: do in parallel: use GB_hyper_prune int64_t cnvec = 0 ; for (int64_t k = 0 ; k < anvec ; k++) { if (Cp [k] < Cp [k+1]) { Ch [cnvec] = Ah [k] ; Cp [cnvec] = Cp [k] ; cnvec++ ; } } Cp [cnvec] = Cp [anvec] ; C->nvec = cnvec ; ASSERT (C->nvec == C_nvec_nonempty) ; } C->p = Cp ; Cp = NULL ; C->p_size = Cp_size ; C->h = Ch ; Ch = NULL ; C->h_size = Ch_size ; C->i = Ci ; Ci = NULL ; C->i_size = Ci_size ; C->x = Cx ; Cx = NULL ; C->x_size = Cx_size ; C->plen = anvec ; C->magic = GB_MAGIC ; C->nvec_nonempty = C_nvec_nonempty ; C->jumbled = A_jumbled ; // C is jumbled if A is jumbled C->iso = C_iso ; // OK: burble already done above ASSERT_MATRIX_OK (C, "C output for GB_selector", GB0) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; }
main.c
// Lid-driven Cavity with explicit, central difference Artificial // Compressibility method. See Ferziger Computational Methods for Fluid // Dynamics, section 7.4.3. Travis Burrows. #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> // Simulation Parameters #define N 50 // Number of points in X and Y (including BCs) #define Re 100.0 // Reynolds number #define Beta 0.5 // compressibility constant #define SAVETXT 1 // Controls whether to save a text file output #define dt 5E-3 // dt #define THRESH 1E-10 // Defines convergence of dp/dt #define THREADS 4 // parallel threads #define DEBUG 0 // Prints extra information // Global Constants #define H 1.0 // length of side of square domain #define MAXITER 1E7 // Maximum iterations // Macros #define LU(i, j, Ni) (((Ni) * (j)) + (i)) // Look-up function, 2d, Ni x Nj #define P2(x) ((x) * (x)) // x^2 // Function Prototypes double *malloc_vectord(int n1); void free_vectord(double *a); void zeros(double *array, int n); void linspace(double *array, double start, double stop, int num); void copy(double *source, double *destination, int n); void datwrite(char filename[], char name1[], double *x, char name2[], double *y, char name3[], double *value3, char name4[], double *value4, char name5[], double *value5); void printVector(char name[], double *vector, int n); void printMatrix(char name[], double *vector, int n1, int n2); void enforceBCs(double *u, double *v, double *p); double diff(double *array1, double *array2, int n); double mean(double *array, int n); int main(void) { // Allocate Memory double *U = malloc_vectord(N * N); double *V = malloc_vectord(N * N); double *P = malloc_vectord(N * N); double *Up = malloc_vectord(N * N); double *Vp = malloc_vectord(N * N); double *Pp = malloc_vectord(N * N); double *x = malloc_vectord(N * N); double *y = malloc_vectord(N * N); double dp, pmean, start, stop, du, Pe; // Initialize variables double dx = H / (N - 2); linspace(x, -dx / 2.0, H + dx / 2.0, N); copy(x, y, N); char filename[80]; snprintf(filename, sizeof(filename), "Solution_n=%dRe=%.0f.txt", N, Re); if (DEBUG == 1) { printf("dx = %.3f\n", dx); printVector("x", x, N); } // Initial Guess zeros(U, N * N); zeros(V, N * N); zeros(P, N * N); enforceBCs(U, V, P); // Set number of parallel threads omp_set_num_threads(THREADS); // Iteration start = omp_get_wtime(); for (int k = 0; k < MAXITER; k++) { if (DEBUG == 1) { printMatrix("U", U, N, N); printMatrix("V", V, N, N); printMatrix("P", P, N, N); } // Store previous values copy(U, Up, N * N); copy(V, Vp, N * N); copy(P, Pp, N * N); // Start parallel block #pragma omp parallel { // Get thread ID int tid = omp_get_thread_num(); // Solve for U, V, P with round-robin parallel scheme over i for (int i = 1 + tid; i < N - 1; i += THREADS) { for (int j = 1; j < N - 1; j++) { U[LU(i, j, N)] = Up[LU(i, j, N)] + (dt / (Re * P2(dx))) * (Up[LU(i + 1, j, N)] + Up[LU(i - 1, j, N)] + Up[LU(i, j + 1, N)] + Up[LU(i, j - 1, N)] - 4.0 * Up[LU(i, j, N)]) - (dt / (2.0 * dx)) * (Pp[LU(i + 1, j, N)] - Pp[LU(i - 1, j, N)]) - (dt * Up[LU(i, j, N)] / dx) * (Up[LU(i + 1, j, N)] - Up[LU(i - 1, j, N)]) - (dt * Vp[LU(i, j, N)] / (2.0 * dx)) * (Up[LU(i, j + 1, N)] - Up[LU(i, j - 1, N)]) - (dt * Up[LU(i, j, N)] / (2.0 * dx)) * (Vp[LU(i, j + 1, N)] - Vp[LU(i, j - 1, N)]); V[LU(i, j, N)] = Vp[LU(i, j, N)] + (dt / (Re * P2(dx))) * (Vp[LU(i + 1, j, N)] + Vp[LU(i - 1, j, N)] + Vp[LU(i, j + 1, N)] + Vp[LU(i, j - 1, N)] - 4.0 * Vp[LU(i, j, N)]) - (dt / (2.0 * dx)) * (Pp[LU(i, j + 1, N)] - Pp[LU(i, j - 1, N)]) - (dt * Vp[LU(i, j, N)] / dx) * (Vp[LU(i, j + 1, N)] - Vp[LU(i, j - 1, N)]) - (dt * Up[LU(i, j, N)] / (2.0 * dx)) * (Vp[LU(i + 1, j, N)] - Vp[LU(i - 1, j, N)]) - (dt * Vp[LU(i, j, N)] / (2.0 * dx)) * (Up[LU(i + 1, j, N)] - Up[LU(i - 1, j, N)]); P[LU(i, j, N)] = Pp[LU(i, j, N)] - (dt / (2.0 * dx * Beta)) * (Up[LU(i + 1, j, N)] - Up[LU(i - 1, j, N)] + Vp[LU(i, j + 1, N)] - Vp[LU(i, j - 1, N)]); } } } // Ensure boundary conditions are not changed enforceBCs(U, V, P); // Calculate change in pressure and u dp = diff(P, Pp, N * N); du = diff(U, Up, N * N); pmean = mean(P, N * N); // Print iteration info every 500 iterations if (k % 500 == 0) { printf("\nIteration %d:\n", k); printf("dp:\t%.3e\n", dp); printf("du:\t%.3e\n", du); printf("pav:\t%.3e\n", pmean); } // Stop if dp/dt is below specified threshold if (dp<THRESH & k> 50) break; } // Print execution time stop = omp_get_wtime(); printf("\nExecution Time:\t%.3e s\n", stop - start); // Save a tecplot-formatted file if (SAVETXT == 1) { datwrite(filename, "x", x, "y", y, "u", U, "v", V, "p", P); } // If a grid point falls on x = 0.5, print profile if (N % 2 == 1) { printf("U Values along x=0.5:\n"); printf("y\tU\n"); int xint = (N - 1) / 2; for (int i = 0; i < N; i++) { printf("%.4f\t%.5f\n", y[i], U[LU(xint, i, N)]); } } // Free Memory free_vectord(U); free_vectord(V); free_vectord(P); free_vectord(Up); free_vectord(Vp); free_vectord(Pp); free_vectord(x); free_vectord(y); return 0; } // Copies a vector void copy(double *source, double *destination, int n) { for (int i = 0; i < n; i++) { destination[i] = source[i]; } } // Returns evenly spaced numbers over a specified interval void linspace(double *array, double start, double stop, int num) { for (int i = 0; i < num; i++) { array[i] = start + ((double)i) * (stop - start) / (double)(num - 1); } } // Allocates memory for 1D array of doubles double *malloc_vectord(int n1) { if (n1 <= 0) { // Checks for invalid inputs printf("Invalid input into malloc_vectord\n"); } else { double *mat = malloc(n1 * sizeof(double)); if (mat == NULL) printf("Error allocating memory!"); return mat; } } // Frees memory for 1D double array void free_vectord(double *a) { if (a == NULL) printf("Error: Null input in free_vectord"); free((void *)a); } // Assigns zeros to a vector void zeros(double *array, int n) { for (int i = 0; i < n; i++) { array[i] = 0.0; } } // Writes Tecplot file void datwrite(char filename[], char name1[], double *x, char name2[], double *y, char name3[], double *value3, char name4[], double *value4, char name5[], double *value5) { FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } fprintf(f, "TITLE=\"%s\" VARIABLES=\"%s\", \"%s\", \"%s\", \"%s\", \"%s\" " "ZONE T=\"%s\" I=%d J=%d F=POINT\n", filename, name1, name2, name3, name4, name5, filename, N - 2, N - 2); for (int i = 1; i < N - 1; i++) { for (int j = 1; j < N - 1; j++) { fprintf(f, "%.10e, %.10e, %.10e, %.10e, %.10e\n", x[i], y[j], value3[LU(i, j, N)], value4[LU(i, j, N)], value5[LU(i, j, N)]); } } fclose(f); } // Prints a vector void printVector(char name[], double *vector, int n) { printf("%s:\t", name); for (int i = 0; i < n; i++) { printf("%.3f ", vector[i]); } printf("\n"); } // Prints a matrix void printMatrix(char name[], double *vector, int n1, int n2) { printf("%s:\n", name); for (int j = 0; j < n2; j++) { for (int i = 0; i < n1; i++) { printf("%.4f\t", vector[LU(i, j, n1)]); } printf("\n"); } printf("\n"); } // Enforces Lid driven cavity flow boundary conditions void enforceBCs(double *u, double *v, double *p) { for (int i = 0; i < N; i++) { // Bottom Surface u[LU(i, 0, N)] = -u[LU(i, 1, N)]; v[LU(i, 0, N)] = -v[LU(i, 1, N)]; p[LU(i, 0, N)] = p[LU(i, 1, N)]; // Top Surface u[LU(i, N - 1, N)] = 2.0 - u[LU(i, N - 2, N)]; v[LU(i, N - 1, N)] = -v[LU(i, N - 2, N)]; p[LU(i, N - 1, N)] = p[LU(i, N - 2, N)]; // Left Surface u[LU(0, i, N)] = -u[LU(1, i, N)]; v[LU(0, i, N)] = -v[LU(1, i, N)]; p[LU(0, i, N)] = p[LU(1, i, N)]; // Right Surface u[LU(N - 1, i, N)] = -u[LU(N - 2, i, N)]; v[LU(N - 1, i, N)] = -v[LU(N - 2, i, N)]; p[LU(N - 1, i, N)] = p[LU(N - 2, i, N)]; } } // Returns L2 norm of difference of two arrays double diff(double *array1, double *array2, int n) { double difference = 0; for (int i = 0; i < n; i++) { difference += P2(array1[i] - array2[i]); } return sqrt(difference); } // Returns mean of an array double mean(double *array, int n) { double average = 0; for (int i = 0; i < n; i++) { average += array[i] / n; } return average; }
GB_Matrix_extractElement.c
//------------------------------------------------------------------------------ // GB_Matrix_extractElement: x = A(row,col) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Extract the value of single scalar, x = A(row,col), typecasting from the // type of A to the type of x, as needed. // Returns GrB_SUCCESS if A(row,col) is present, and sets x to its value. // Returns GrB_NO_VALUE if A(row,col) is not present, and x is unmodified. // This template constructs GrB_Matrix_extractElement_[TYPE] for each of the // 13 built-in types, and the _UDT method for all user-defined types. // FUTURE: tolerate zombies GrB_Info GB_EXTRACT_ELEMENT // extract a single entry, x = A(row,col) ( GB_XTYPE *x, // scalar to extract, not modified if not found const GrB_Matrix A, // matrix to extract a scalar from GrB_Index row, // row index GrB_Index col // column index ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL_OR_FAULTY (A) ; GB_RETURN_IF_NULL (x) ; // TODO: do not wait unless jumbled. First try to find the element. // If found (live or zombie), no need to wait. If not found and pending // tuples exist, wait and then extractElement again. // delete any lingering zombies, assemble any pending tuples, and unjumble if (GB_ANY_PENDING_WORK (A)) { GrB_Info info ; GB_WHERE1 (GB_WHERE_STRING) ; GB_BURBLE_START ("GrB_Matrix_extractElement") ; GB_OK (GB_wait (A, "A", Context)) ; GB_BURBLE_END ; } ASSERT (!GB_ANY_PENDING_WORK (A)) ; // look for index i in vector j int64_t i, j, nrows, ncols ; if (A->is_csc) { i = row ; j = col ; nrows = A->vlen ; ncols = A->vdim ; } else { i = col ; j = row ; nrows = A->vdim ; ncols = A->vlen ; } // check row and column indices if (row >= nrows || col >= ncols) { return (GrB_INVALID_INDEX) ; } // GB_XCODE and A must be compatible GB_Type_code acode = A->type->code ; if (!GB_code_compatible (GB_XCODE, acode)) { return (GrB_DOMAIN_MISMATCH) ; } if (GB_nnz (A) == 0) { // quick return return (GrB_NO_VALUE) ; } //-------------------------------------------------------------------------- // find the entry A(i,j) //-------------------------------------------------------------------------- int64_t pleft ; bool found ; const int64_t *restrict Ap = A->p ; if (Ap != NULL) { // A is sparse or hypersparse const int64_t *restrict Ai = A->i ; // extract from vector j of a GrB_Matrix int64_t k ; if (A->h != NULL) { // A is hypersparse: look for j in hyperlist A->h [0 ... A->nvec-1] const int64_t *restrict Ah = A->h ; int64_t pleft = 0 ; int64_t pright = A->nvec-1 ; GB_BINARY_SEARCH (j, Ah, pleft, pright, found) ; if (!found) { // vector j is empty return (GrB_NO_VALUE) ; } ASSERT (j == Ah [pleft]) ; k = pleft ; } else { // A is sparse: j = k is the kth vector k = j ; } pleft = Ap [k] ; int64_t pright = Ap [k+1] - 1 ; // binary search in kth vector for index i // Time taken for this step is at most O(log(nnz(A(:,j))). GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ; } else { // A is bitmap or full pleft = i + j * A->vlen ; const int8_t *restrict Ab = A->b ; if (Ab != NULL) { // A is bitmap found = (Ab [pleft] == 1) ; } else { // A is full found = true ; } } //-------------------------------------------------------------------------- // extract the element //-------------------------------------------------------------------------- if (found) { #if !defined ( GB_UDT_EXTRACT ) if (GB_XCODE == acode) { // copy A [pleft] into x, no typecasting, for built-in types only. GB_XTYPE *restrict Ax = ((GB_XTYPE *) (A->x)) ; (*x) = Ax [A->iso ? 0:pleft] ; } else #endif { // typecast the value from A [pleft] into x size_t asize = A->type->size ; void *ax = ((GB_void *) A->x) + (A->iso ? 0 : (pleft*asize)) ; GB_cast_scalar (x, GB_XCODE, ax, acode, asize) ; } // TODO: do not flush if extracting to GrB_Scalar #pragma omp flush return (GrB_SUCCESS) ; } else { // Entry not found. return (GrB_NO_VALUE) ; } } #undef GB_UDT_EXTRACT #undef GB_EXTRACT_ELEMENT #undef GB_XTYPE #undef GB_XCODE
zSchCompUdt-cuda.c
/*! @file * \brief This file contains the main loop of pzgstrf which involves * rank k update of the Schur complement. * Uses CUDA GPU. * * <pre> * -- Distributed SuperLU routine (version 4.0) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * */ #define SCHEDULE_STRATEGY dynamic #define cublasCheckErrors(fn) \ do { \ cublasStatus_t __err = fn; \ if (__err != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \ (int)(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while(0); if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */ ldu =0; full =1; int cum_nrow; int temp_nbrow; lptr = lptr0; luptr = luptr0; nbrow= lsub[1]; if (myrow==krow) nbrow = lsub[1]-lsub[3]; if (nbrow>0) { int ncol_max = SUPERLU_MIN(buffer_size/nbrow,bigu_size/ldt); int num_streams_used, /*number of streams that will be used*/ ncpu_blks; /*Number of CPU dgemm blks*/ int jjj, jjj_st,jjj_global; for (j = jj0; j < nub; ++j) { arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); ncols =0 ; //initialize at 0 jj = iukp; int temp_ldu=0; for (; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { ++ncols; } temp_ldu = SUPERLU_MAX(temp_ldu, segsize); } full_u_cols[j] = ncols; blk_ldu[j] = temp_ldu; } /* end for j = jj0..nub */ jjj = jj0; /* initialization */ // #pragma omp barrier while ( jjj < nub ) { jjj_st=jjj; #ifdef _OPENMP #pragma omp single #endif { ldu = blk_ldu[jjj_st]; for (j = jjj_st; j < nub ; ++j) { /* prefix sum */ if (j != jjj_st) full_u_cols[j] += full_u_cols[j-1]; ldu = SUPERLU_MAX(ldu, blk_ldu[j]); /* break condition */ /* the number of columns that can be processed is limited by buffer size*/ if (full_u_cols[j]+((j+1==nub)?0:full_u_cols[j+1]) > ncol_max) { break; } } /* end for j=jjj_st to nub */ jjj_global = SUPERLU_MIN(nub, j+1); /* Maximum value of jjj will be nub */ // TAU_STATIC_TIMER_START("work_divison"); /* Divide CPU-GPU gemm here */ gemm_division_cpu_gpu( &num_streams_used, /*number of streams that will be used*/ stream_end_col, /*array holding last column blk for each partition*/ &ncpu_blks, /*Number of CPU gemm blks*/ /*input*/ nbrow, /*number of row in A matrix*/ ldu, /*number of k in dgemm*/ nstreams, full_u_cols + jjj_st, /*array containing prefix sum of work load*/ jjj_global-jjj_st /*Number of work load */ ); // TAU_STATIC_TIMER_STOP("work_divison"); } /* pragma omp single */ jjj = jjj_global; // printf("thread_id %d, jjj %d \n",thread_id,jjj ); if (jjj == jjj_st+1 && full_u_cols[jjj_st] > ncol_max) { printf("allocate more memory for buffer !!!!\n"); if(nbrow * full_u_cols[jjj_st] > buffer_size) printf("%d buffer_size %d\n",nbrow*full_u_cols[jjj_st],buffer_size ); } // #pragma omp barrier /* gathering circuit */ assert(jjj_st<nub); assert(jjj-1<nub); // TAU_STATIC_TIMER_START("GATHER_U"); #ifdef _OPENMP #pragma omp for schedule( SCHEDULE_STRATEGY ) #endif for (j = jjj_st; j < jjj; ++j) { if (j==jjj_st) tempu = bigU; else tempu = bigU + ldu*full_u_cols[j-1]; /* == processing each of the remaining columns == */ arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid); // tempu = tempU2d; for (jj = iukp; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; tempu += lead_zero; for (i = 0; i < segsize; ++i) tempu[i] = uval[rukp+i]; rukp += segsize; tempu += segsize; } } rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */ } /* end for j=jjj_st to jjj */ if ( num_streams_used > 0 ) { #ifdef PI_DEBUG printf("nbrow %d *ldu %d =%d < ldt %d * max_row_size %d =%d \n",nbrow,ldu,nbrow*ldu,ldt,max_row_size,ldt*max_row_size ); assert(nbrow*ldu<=ldt*max_row_size); #endif cudaMemcpy2DAsync(dA, nbrow*sizeof(doublecomplex), &lusup[luptr+(knsupc-ldu)*nsupr], nsupr*sizeof(doublecomplex), nbrow*sizeof(doublecomplex), ldu, cudaMemcpyHostToDevice, streams[0]); } for (int i = 0; i < num_streams_used; ++i) { int st = (i==0) ? ncpu_blks+jjj_st : jjj_st+stream_end_col[i-1]; int st_col = full_u_cols[st-1]; int num_col_stream = full_u_cols[jjj_st+stream_end_col[i]-1]-full_u_cols[st-1]; tempu = bigU; doublecomplex *tempv1 = bigV + full_u_cols[st-1]*nbrow; /* Following is for testing purpose */ #ifdef GPU_ACC int stream_id = i; int b_offset = ldu * st_col; int c_offset = st_col * nbrow; size_t B_stream_size = ldu * num_col_stream * sizeof(doublecomplex); size_t C_stream_size = nbrow * num_col_stream * sizeof(doublecomplex); assert(ldu*(st_col+num_col_stream) < bigu_size); assert(nbrow*(st_col+num_col_stream) < buffer_size); cudaMemcpyAsync(dB+b_offset, tempu+b_offset, B_stream_size, cudaMemcpyHostToDevice, streams[stream_id]); cublasCheckErrors( cublasSetStream(handle[stream_id], streams[stream_id]) ); cublasCheckErrors( cublasZgemm(handle[stream_id], CUBLAS_OP_N, CUBLAS_OP_N, nbrow, num_col_stream, ldu, (const cuDoubleComplex*) &alpha, (const cuDoubleComplex*) dA, nbrow, (const cuDoubleComplex*) &dB[b_offset], ldu, (const cuDoubleComplex*) &beta, (cuDoubleComplex*)&dC[c_offset], nbrow) ); checkCuda( cudaMemcpyAsync(tempv1, dC+c_offset, C_stream_size, cudaMemcpyDeviceToHost, streams[stream_id]) ); #else if ( num_col_stream > 0 ) { my_zgemm_("N", "N", &nbrow, &num_col_stream, &ldu, &alpha, &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, tempu+ldu*st_col, &ldu, &beta, tempv1, &nbrow, 1, 1); } #endif } /* end for i = 1 to num_streams used */ int num_col = full_u_cols[jjj_st+ncpu_blks-1]; int st_col = 0; /*special case for cpu */ tempv = bigV + nbrow * st_col; tempu = bigU; double tstart = SuperLU_timer_(); #if defined (USE_VENDOR_BLAS) zgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha, &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow, 1, 1); #else zgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha, &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow); #endif gemm_timer += SuperLU_timer_() -tstart; stat->ops[FACT] += 2 * nbrow * ldu * full_u_cols[jjj-1]; // printf("after zgemm \n"); /* Now scattering blocks handled by cpu */ int temp_ncol; /* scatter first blocks which cpu has computated*/ tstart = SuperLU_timer_(); #ifdef _OPENMP #pragma omp parallel \ private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \ segsize,lead_zero, \ ib, temp_nbrow,ilst,lib,index, \ ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \ nzval, lb , jj, i) \ firstprivate(luptr,lptr) default (shared) #endif { int thread_id = omp_get_thread_num(); int* indirect_thread = indirect + ldt*thread_id; int* indirect2_thread = indirect2 + ldt*thread_id; doublecomplex* tempv1; if (ncpu_blks< omp_get_num_threads()) { // TAU_STATIC_TIMER_START("SPECIAL_CPU_SCATTER"); for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) { /* code */ #ifdef PI_DEBUG printf("scattering %d block column\n",j); #endif /* == processing each of the remaining columns == */ if(j==jjj_st) tempv1 = bigV; else tempv1 = bigV + full_u_cols[j-1]*nbrow; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); cum_nrow =0 ; /* do update with the kth column of L and (k,j)th block of U */ lptr = lptr0; luptr = luptr0; #ifdef _OPENMP #pragma omp for schedule( SCHEDULE_STRATEGY ) nowait #endif for (lb = 0; lb < nlb; lb++ ) { int cum_nrow = 0; int temp_nbrow; lptr = lptr0; luptr = luptr0; for (int i = 0; i < lb; ++i) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow +=temp_nbrow; } ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ assert(temp_nbrow<=nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ /* Now gather the result into the destination block. */ if ( ib < jb ) { /* A(i,j) is in U. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_u ( ib,jb, nsupc,iukp,xsup, klst,nbrow, lptr,temp_nbrow,lsub, usub,tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { /* A(i,j) is in L. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_l ( ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr, temp_nbrow,usub,lsub,tempv, indirect_thread,indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } /* if ib < jb ... */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow += temp_nbrow; } /* for lb ... */ luptr=luptr0; } /* for j = jjj_st ... */ // TAU_STATIC_TIMER_STOP("SPECIAL_CPU_SCATTER"); } else { #ifdef _OPENMP #pragma omp for schedule(SCHEDULE_STRATEGY) nowait #endif for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) { /* code */ #ifdef PI_DEBUG printf("scattering %d block column\n",j); #endif /* == processing each of the remaining columns == */ if(j==jjj_st) tempv1 = bigV; else tempv1 = bigV + full_u_cols[j-1]*nbrow; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); cum_nrow =0 ; /* do update with the kth column of L and (k,j)th block of U */ lptr = lptr0; luptr = luptr0; for (lb = 0; lb < nlb; lb++ ) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ assert(temp_nbrow<=nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ #ifdef DGEMM_STAT if(j==jjj_st) { temp_ncol = full_u_cols[j]; } else { temp_ncol = full_u_cols[j]- full_u_cols[j-1]; } printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu); #endif /* Now gather the result into the destination block. */ if ( ib < jb ) { /* A(i,j) is in U. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_u ( ib,jb, nsupc,iukp,xsup, klst,nbrow, lptr,temp_nbrow,lsub, usub,tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { /* A(i,j) is in L. */ #ifdef PI_DEBUG printf("cpu scatter \n"); printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_l ( ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr, temp_nbrow,usub,lsub,tempv, indirect_thread,indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } /* if ib < jb ... */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow += temp_nbrow; } /* for lb ... */ luptr=luptr0; } /* for j = jjj_st ... */ } /* else if (ncpu_blks >= omp_get_num_threads()) */ } /* parallel region */ scatter_timer += SuperLU_timer_() - tstart; #ifdef _OPENMP #pragma omp parallel \ private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \ segsize,lead_zero, \ ib, temp_nbrow,ilst,lib,index, \ ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \ nzval, lb , jj, i) \ firstprivate(luptr,lptr) default (shared) #endif { int thread_id = omp_get_thread_num(); int* indirect_thread = indirect + ldt*thread_id; int* indirect2_thread = indirect2 + ldt*thread_id; doublecomplex* tempv1; for(i = 0; i < num_streams_used; i++) { /* i is private variable */ checkCuda(cudaStreamSynchronize (streams[i])); int jjj_st1 = (i==0) ? jjj_st + ncpu_blks : jjj_st + stream_end_col[i-1]; int jjj_end = jjj_st + stream_end_col[i]; assert(jjj_end-1<nub); assert(jjj_st1>jjj_st) ; /* now scatter it */ #pragma omp for schedule( SCHEDULE_STRATEGY ) nowait for (j = jjj_st1; j < jjj_end; ++j) { /* code */ #ifdef PI_DEBUG printf("scattering %d block column\n",j); #endif /* == processing each of the remaining columns == */ if(j==jjj_st) tempv1 = bigV; else tempv1 = bigV + full_u_cols[j-1]*nbrow; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); cum_nrow =0 ; /* do update with the kth column of L and (k,j)th block of U */ lptr = lptr0; luptr = luptr0; for (lb = 0; lb < nlb; lb++) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ assert(temp_nbrow<=nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ #ifdef DGEMM_STAT if(j==jjj_st) { temp_ncol = full_u_cols[j]; } else { temp_ncol = full_u_cols[j]- full_u_cols[j-1]; } printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu); #endif /* Now gather the result into the destination block. */ if ( ib < jb ) { /* A(i,j) is in U. */ #ifdef PI_DEBUG printf("gpu scatter \n"); printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_u ( ib,jb, nsupc,iukp,xsup, klst,nbrow, lptr,temp_nbrow,lsub, usub,tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { /* A(i,j) is in L. */ #ifdef PI_DEBUG printf("gpu scatter \n"); printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb); #endif tempv = tempv1+cum_nrow; zscatter_l ( ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr, temp_nbrow,usub,lsub,tempv, indirect_thread,indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } /* if ib < jb ... */ lptr += temp_nbrow; luptr += temp_nbrow; cum_nrow += temp_nbrow; } /* for lb ... */ luptr=luptr0; } /* for j = jjj_st ... */ } /* end for i = 0 to nstreams */ // TAU_STATIC_TIMER_STOP("GPU_SCATTER"); // TAU_STATIC_TIMER_STOP("INSIDE_OMP"); } /* end pragma omp parallel */ // TAU_STATIC_TIMER_STOP("OUTSIDE_OMP"); } /* end while(jjj<nub) */ } /* if nbrow>0 */ } /* if msg1 and msg 2 */
omp_apps.c
#include "omp_apps.h" #include <stdio.h> /* -------------------------------Dot Product------------------------------*/ double* gen_array(int n) { double* array = (double*)malloc(n * sizeof(double)); for (int i = 0; i < n; i++) array[i] = drand48(); return array; } double dotp_naive(double* x, double* y, int arr_size) { double global_sum = 0.0; #pragma omp parallel { #pragma omp for for (int i = 0; i < arr_size; i++) #pragma omp critical global_sum += x[i] * y[i]; } return global_sum; } // EDIT THIS FUNCTION PART 1 double dotp_manual_optimized(double* x, double* y, int arr_size) { double global_sum = 0.0; int i; #pragma omp for private(i) for (i = 0; i <= arr_size - 4; i += 4) { #pragma omp critical { global_sum += x[i] * y[i]; global_sum += x[i + 1] * y[i + 1]; global_sum += x[i + 2] * y[i + 2]; global_sum += x[i + 3] * y[i + 3]; } } for (; i < arr_size; i += 1) { global_sum += x[i] * y[i]; } return global_sum; } // EDIT THIS FUNCTION PART 2 double dotp_reduction_optimized(double* x, double* y, int arr_size) { double global_sum = 0.0; #pragma omp parallel { #pragma omp for reduction(+ \ : global_sum) for (int i = 0; i < arr_size; i++) global_sum += x[i] * y[i]; } return global_sum; } char* compute_dotp(int arr_size) { // Generate input vectors char *report_buf = (char*)malloc(BUF_SIZE), *pos = report_buf; double start_time, run_time; double *x = gen_array(arr_size), *y = gen_array(arr_size); double serial_result = 0.0, result = 0.0; // calculate result serially for (int i = 0; i < arr_size; i++) { serial_result += x[i] * y[i]; } int num_threads = omp_get_max_threads(); for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_manual_optimized(x, y, arr_size); run_time = omp_get_wtime() - start_time; pos += sprintf(pos, "Manual Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { pos += sprintf(pos, "Incorrect result!\n"); *pos = '\0'; return report_buf; } } for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) { result = dotp_reduction_optimized(x, y, arr_size); } run_time = omp_get_wtime() - start_time; pos += sprintf(pos, "Reduction Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { pos += sprintf(pos, "Incorrect result!\n"); *pos = '\0'; return report_buf; } } // Only run this once because it's too slow.. omp_set_num_threads(1); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_naive(x, y, arr_size); run_time = omp_get_wtime() - start_time; pos += sprintf(pos, "Naive: %d thread(s) took %f seconds\n", 1, run_time); *pos = '\0'; return report_buf; } /* ---------------------Image Processing: Sobel Edge Detector----------------------*/ int sobel[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; void sobel_filter(bmp_pixel** src, bmp_pixel** dst, int row, int col) { int res = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { bmp_pixel pxl = src[row - 1 + i][col - 1 + j]; res += ((int)pxl.blue + (int)pxl.green + (int)pxl.red) * sobel[i][j]; } } res *= 2; // scale a little bit so the result image is brighter. res = res < 0 ? 0 : (res > 255 ? 255 : res); bmp_pixel_init(&dst[row][col], res, res, res); } char* image_proc(const char* filename) { bmp_img img, img_copy; if (bmp_img_read(&img, filename) != 0) return 0; char* res = (char*)calloc(32, sizeof(char)); strncat(res, filename, strlen(filename) - 4); strcat(res, "_sobel.bmp"); bmp_img_read(&img_copy, filename); unsigned int wid = img.img_header.biWidth; unsigned int hgt = img.img_header.biHeight; bmp_img_init_df(&img_copy, wid, hgt); // To parallelize this for loops, check out scheduling policy: http://jakascorner.com/blog/2016/06/omp-for-scheduling.html // and omp collapse directive https://software.intel.com/en-us/articles/openmp-loop-collapse-directive for (int i = 1; i < hgt - 1; i++) { for (int j = 1; j < wid - 1; j++) { sobel_filter(img.img_pixels, img_copy.img_pixels, i, j); } } bmp_img_write(&img_copy, res); bmp_img_free(&img_copy); bmp_img_free(&img); return res; }
core_zunmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_unmqr * * Overwrites the general m-by-n tile C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is a unitary matrix defined as the product of k * elementary reflectors * \f[ * Q = H(1) H(2) ... H(k) * \f] * as returned by core_zgeqrt. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : No transpose, apply Q; * - Plasma_ConjTrans : Transpose, apply Q^H. * * @param[in] m * The number of rows of the tile C. m >= 0. * * @param[in] n * The number of columns of the tile C. n >= 0. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * If side = PlasmaLeft, m >= k >= 0; * if side = PlasmaRight, n >= k >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in] A * Dimension: (lda,k) * The i-th column must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, * as returned by core_zgeqrt in the first k columns of its * array argument A. * * @param[in] lda * The leading dimension of the array A. * If side = PlasmaLeft, lda >= max(1,m); * if side = PlasmaRight, lda >= max(1,n). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param[in,out] C * On entry, the m-by-n tile C. * On exit, C is overwritten by Q*C or Q^T*C or C*Q^T or C*Q. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * * @param work * Auxiliary workspace array of length * ldwork-by-n if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ int core_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_complex64_t *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { coreblas_error("illegal value of side"); return -1; } int nq; // order of Q int nw; // dimension of work if (side == PlasmaLeft) { nq = m; nw = n; } else { nq = n; nw = m; } if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) { coreblas_error("illegal value of trans"); return -2; } if (m < 0) { coreblas_error("illegal value of m"); return -3; } if (n < 0) { coreblas_error("illegal value of n"); return -4; } if (k < 0 || k > nq) { coreblas_error("illegal value of k"); return -5; } if (ib < 0) { coreblas_error("illegal value of ib"); return -6; } if (A == NULL) { coreblas_error("NULL A"); return -7; } if (lda < imax(1, nq) && nq > 0) { coreblas_error("illegal value of lda"); return -8; } if (T == NULL) { coreblas_error("NULL T"); return -9; } if (ldt < imax(1, ib)) { coreblas_error("illegal value of ldt"); return -10; } if (C == NULL) { coreblas_error("NULL C"); return -11; } if (ldc < imax(1, m) && m > 0) { coreblas_error("illegal value of ldc"); return -12; } if (work == NULL) { coreblas_error("NULL work"); return -13; } if (ldwork < imax(1, nw) && nw > 0) { coreblas_error("illegal value of ldwork"); return -14; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int ni = n; int mi = m; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = m - i; ic = i; } else { // H or H^H is applied to C(1:m,i:n). ni = n - i; jc = i; } // Apply H or H^H. LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(side), lapack_const(trans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), mi, ni, kb, &A[lda*i+i], lda, &T[ldt*i], ldt, &C[ldc*jc+ic], ldc, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void core_omp_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*k]) \ depend(in:T[0:ib*k]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? n : m; // TODO: double check // Call the kernel. int info = core_zunmqr(side, trans, m, n, k, ib, A, lda, T, ldt, C, ldc, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_zunmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
hermv_c_dia_n_lo_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Complex *x, const ALPHA_Complex beta, ALPHA_Complex *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis < 0) { const ALPHA_INT row_start = -dis; const ALPHA_INT col_start = 0; const ALPHA_INT nnz = m + dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Complex v,v_c; ALPHA_Complex val_orig = A->values[start + row_start + j]; ALPHA_Complex val_conj = {val_orig.real,-val_orig.imag}; alpha_mul(v, alpha, val_orig); alpha_mul(v_c, alpha, val_conj); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); alpha_madde(tmp[threadId][row_start + j], v_c, x[col_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_extract_vector_list.c
//------------------------------------------------------------------------------ // GB_extract_vector_list: extract vector indices for all entries in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Constructs a list of vector indices for each entry in a matrix. Creates // the output J for GB_extractTuples, and I for GB_transpose when the qsort // method is used. #include "GB_ek_slice.h" #define GB_FREE_WORK \ GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice) ; bool GB_extract_vector_list // true if successful, false if out of memory ( // output: int64_t *GB_RESTRICT J, // size nnz(A) or more // input: const GrB_Matrix A, int nthreads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (J != NULL) ; ASSERT (A != NULL) ; ASSERT (nthreads >= 1) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; //-------------------------------------------------------------------------- // determine the # of tasks to use //-------------------------------------------------------------------------- int64_t anz = GB_NNZ (A) ; int ntasks = (nthreads == 1) ? 1 : (2 * nthreads) ; ntasks = GB_IMIN (ntasks, anz) ; ntasks = GB_IMAX (ntasks, 1) ; //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- // Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 and // vectors kfirst_slice [tid] to klast_slice [tid]. The first and last // vectors may be shared with prior slices and subsequent slices. int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ; if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, ntasks)) { // out of memory return (false) ; } //-------------------------------------------------------------------------- // extract the vector index for each entry //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = (Ah == NULL) ? k : Ah [k] ; int64_t pA_start, pA_end ; GB_get_pA_and_pC (&pA_start, &pA_end, NULL, tid, k, kfirst, klast, pstart_slice, NULL, NULL, Ap) ; //------------------------------------------------------------------ // extract vector indices of A(:,j) //------------------------------------------------------------------ for (int64_t p = pA_start ; p < pA_end ; p++) { J [p] = j ; } } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; return (true) ; }
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicAllOfMatcher<DecompositionDecl> decompositionDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// hasAnyBody(functionDecl()) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode inline internal::Matcher<BinaryOperator> hasOperands(const internal::Matcher<Expr> &Matcher1, const internal::Matcher<Expr> &Matcher2) { return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1))); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
convolution_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if defined(__ARM_NEON) static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; // int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+1<inch; q+=2) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* kernel0 = kernel + p*inch*4 + q*4; const float* kernel1 = kernel0 + 4; const float* r00 = img0; const float* r01 = img0 + w; const float* r10 = img1; const float* r11 = img1 + w; #if defined(__ARM_NEON) float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if defined(__ARM_NEON) int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if defined(__ARM_NEON) #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r000 = vld1q_f32(r00); float32x4_t _r010 = vld1q_f32(r01); float32x4_t _r001 = vld1q_f32(r00 + 1); float32x4_t _r011 = vld1q_f32(r01 + 1); float32x4_t _r100 = vld1q_f32(r10); float32x4_t _r110 = vld1q_f32(r11); float32x4_t _r101 = vld1q_f32(r10 + 1); float32x4_t _r111 = vld1q_f32(r11 + 1); float32x4_t _sum = vld1q_f32(outptr); _sum = vmlaq_lane_f32(_sum, _r000, vget_low_f32(_k0), 0); _sum = vmlaq_lane_f32(_sum, _r001, vget_low_f32(_k0), 1); _sum = vmlaq_lane_f32(_sum, _r010, vget_high_f32(_k0), 0); _sum = vmlaq_lane_f32(_sum, _r011, vget_high_f32(_k0), 1); _sum = vmlaq_lane_f32(_sum, _r100, vget_low_f32(_k1), 0); _sum = vmlaq_lane_f32(_sum, _r101, vget_low_f32(_k1), 1); _sum = vmlaq_lane_f32(_sum, _r110, vget_high_f32(_k1), 0); _sum = vmlaq_lane_f32(_sum, _r111, vget_high_f32(_k1), 1); vst1q_f32(outptr, _sum); r00 += 4; r01 += 4; r10 += 4; r11 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d28-d29}, [%4]! \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d18-d19}, [%5] \n"// q9 = sum "vmul.f32 q8, q0, %e12[0] \n" "vmla.f32 q9, q2, %f12[0] \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q10, q0, q1, #1 \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q8, q12, %e13[0] \n" "vmla.f32 q9, q14, %f13[0] \n" "pld [%3, #128] \n" "vld1.f32 {d26-d27}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d30-d31}, [%4]! \n" "vmla.f32 q8, q10, %e12[1] \n" "vmla.f32 q9, q11, %f12[1] \n" "vext.f32 q10, q12, q13, #1 \n" "vext.f32 q11, q14, q15, #1 \n" "vmla.f32 q8, q10, %e13[1] \n" "vmla.f32 q9, q11, %f13[1] \n" "vorr q0, q1, q1 \n" "vorr q2, q3, q3 \n" "vadd.f32 q8, q8, q9 \n" "vorr q12, q13, q13 \n" "vorr q14, q15, q15 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%5]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" "sub %3, #16 \n" "sub %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if defined(__ARM_NEON) float32x2_t _r00 = vld1_f32(r00); float32x2_t _r01 = vld1_f32(r01); float32x4_t _r00r1 = vcombine_f32(_r00, _r01); float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0); float32x2_t _r10 = vld1_f32(r10); float32x2_t _r11 = vld1_f32(r11); float32x4_t _r10r1 = vcombine_f32(_r10, _r11); _s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r00[0] * kernel0[0]; sum += r00[1] * kernel0[1]; sum += r01[0] * kernel0[2]; sum += r01[1] * kernel0[3]; sum += r10[0] * kernel1[0]; sum += r10[1] * kernel1[1]; sum += r11[0] * kernel1[2]; sum += r11[1] * kernel1[3]; *outptr += sum; #endif // __ARM_NEON r00 += 1; r01 += 1; r10 += 1; r11 += 1; outptr++; } r00 += 1; r01 += 1; r10 += 1; r11 += 1; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*4 + q*4; const float* r0 = img0; const float* r1 = img0 + w; #if defined(__ARM_NEON) float32x4_t _k0 = vdupq_n_f32(kernel0[0]); float32x4_t _k1 = vdupq_n_f32(kernel0[1]); float32x4_t _k2 = vdupq_n_f32(kernel0[2]); float32x4_t _k3 = vdupq_n_f32(kernel0[3]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if defined(__ARM_NEON) int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if defined(__ARM_NEON) #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r01 = vld1q_f32(r0 + 1); float32x4_t _r11 = vld1q_f32(r1 + 1); float32x4_t _sum = vld1q_f32(outptr); float32x4_t _sum2; _sum = vmlaq_f32(_sum, _r00, _k0); _sum2 = vmulq_f32(_r01, _k1); _sum = vmlaq_f32(_sum, _r10, _k2); _sum2 = vmlaq_f32(_sum2, _r11, _k3); _sum = vaddq_f32(_sum, _sum2); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d18-d19}, [%3] \n"// q9 = sum "vmul.f32 q8, q0, %q8 \n" "vmla.f32 q9, q2, %q10 \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "vext.f32 q10, q0, q1, #1 \n" "vmla.f32 q8, q10, %q9 \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q9, q11, %q11 \n" "vorr q0, q1, q1 \n" "vadd.f32 q8, q8, q9 \n" "vorr q2, q3, q3 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%3]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); } #endif // __aarch64__ #endif // __ARM_NEON #if defined(__ARM_NEON) float32x4_t _k0123 = vld1q_f32(kernel0); #endif for (; remain>0; remain--) { #if defined(__ARM_NEON) float32x2_t _r0 = vld1_f32(r0); float32x2_t _r1 = vld1_f32(r1); float32x4_t _r0r1 = vcombine_f32(_r0, _r1); float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r0[0] * kernel0[0]; sum += r0[1] * kernel0[1]; sum += r1[0] * kernel0[2]; sum += r1[1] * kernel0[3]; *outptr += sum; #endif r0 += 1; r1 += 1; outptr++; } r0 += 1; r1 += 1; } } } } #endif // __ARM_NEON
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info,MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireQuantumMemory(1,sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) memset(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AcquireSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). % Also represents the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usally 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', % you can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) memset(columns,0,rank*sizeof(*columns)); (void) memset(rows,0,rank*sizeof(*rows)); (void) memset(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n"); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&min_value); max_value=min_value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { double value; if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL,exception); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); *q=ClampToQuantum(value); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the memset method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) memset(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
program_evaluator.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: keir@google.com (Keir Mierle) // // The ProgramEvaluator runs the cost functions contained in each residual block // and stores the result into a jacobian. The particular type of jacobian is // abstracted out using two template parameters: // // - An "EvaluatePreparer" that is responsible for creating the array with // pointers to the jacobian blocks where the cost function evaluates to. // - A "JacobianWriter" that is responsible for storing the resulting // jacobian blocks in the passed sparse matrix. // // This abstraction affords an efficient evaluator implementation while still // supporting writing to multiple sparse matrix formats. For example, when the // ProgramEvaluator is parameterized for writing to block sparse matrices, the // residual jacobians are written directly into their final position in the // block sparse matrix by the user's CostFunction; there is no copying. // // The evaluation is threaded with OpenMP. // // The EvaluatePreparer and JacobianWriter interfaces are as follows: // // class EvaluatePreparer { // // Prepare the jacobians array for use as the destination of a call to // // a cost function's evaluate method. // void Prepare(const ResidualBlock* residual_block, // int residual_block_index, // SparseMatrix* jacobian, // double** jacobians); // } // // class JacobianWriter { // // Create a jacobian that this writer can write. Same as // // Evaluator::CreateJacobian. // SparseMatrix* CreateJacobian() const; // // // Create num_threads evaluate preparers. Caller owns result which must // // be freed with delete[]. Resulting preparers are valid while *this is. // EvaluatePreparer* CreateEvaluatePreparers(int num_threads); // // // Write the block jacobians from a residual block evaluation to the // // larger sparse jacobian. // void Write(int residual_id, // int residual_offset, // double** jacobians, // SparseMatrix* jacobian); // } // // Note: The ProgramEvaluator is not thread safe, since internally it maintains // some per-thread scratch space. #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #ifdef CERES_USE_OPENMP #include <omp.h> #endif #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" namespace ceres { namespace internal { template<typename EvaluatePreparer, typename JacobianWriter> class ProgramEvaluator : public Evaluator { public: ProgramEvaluator(const Evaluator::Options &options, Program* program) : options_(options), program_(program), jacobian_writer_(options, program), evaluate_preparers_( jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) { #ifndef CERES_USE_OPENMP CHECK_EQ(1, options_.num_threads) << "OpenMP support is not compiled into this binary; " << "only options.num_threads=1 is supported."; #endif BuildResidualLayout(*program, &residual_layout_); evaluate_scratch_.reset(CreateEvaluatorScratch(*program, options.num_threads)); } // Implementation of Evaluator interface. SparseMatrix* CreateJacobian() const { return jacobian_writer_.CreateJacobian(); } bool Evaluate(const double* state, double* cost, double* residuals, double* gradient, SparseMatrix* jacobian) { // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; } if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); } if (jacobian != NULL) { jacobian->SetZero(); } // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; } // This bool is used to disable the loop if an error is encountered // without breaking out of it. The remaining loop iterations are still run, // but with an empty body, and so will finish quickly. bool abort = false; int num_residual_blocks = program_->NumResidualBlocks(); #pragma omp parallel for num_threads(options_.num_threads) for (int i = 0; i < num_residual_blocks; ++i) { // Disable the loop instead of breaking, as required by OpenMP. #pragma omp flush(abort) if (abort) { continue; } #ifdef CERES_USE_OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif EvaluatePreparer* preparer = &evaluate_preparers_[thread_id]; EvaluateScratch* scratch = &evaluate_scratch_[thread_id]; // Prepare block residuals if requested. const ResidualBlock* residual_block = program_->residual_blocks()[i]; double* block_residuals = NULL; if (residuals != NULL) { block_residuals = residuals + residual_layout_[i]; } else if (gradient != NULL) { block_residuals = scratch->residual_block_residuals.get(); } // Prepare block jacobians if requested. double** block_jacobians = NULL; if (jacobian != NULL || gradient != NULL) { preparer->Prepare(residual_block, i, jacobian, scratch->jacobian_block_ptrs.get()); block_jacobians = scratch->jacobian_block_ptrs.get(); } // Evaluate the cost, residuals, and jacobians. double block_cost; if (!residual_block->Evaluate( &block_cost, block_residuals, block_jacobians, scratch->residual_block_evaluate_scratch.get())) { abort = true; // This ensures that the OpenMP threads have a consistent view of 'abort'. Do // the flush inside the failure case so that there is usually only one // synchronization point per loop iteration instead of two. #pragma omp flush(abort) continue; } scratch->cost += block_cost; // Store the jacobians, if they were requested. if (jacobian != NULL) { jacobian_writer_.Write(i, residual_layout_[i], block_jacobians, jacobian); } // Compute and store the gradient, if it was requested. if (gradient != NULL) { int num_residuals = residual_block->NumResiduals(); int num_parameter_blocks = residual_block->NumParameterBlocks(); for (int j = 0; j < num_parameter_blocks; ++j) { const ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; if (parameter_block->IsConstant()) { continue; } MatrixRef block_jacobian(block_jacobians[j], num_residuals, parameter_block->LocalSize()); VectorRef block_gradient(scratch->gradient.get() + parameter_block->delta_offset(), parameter_block->LocalSize()); VectorRef block_residual(block_residuals, num_residuals); block_gradient += block_residual.transpose() * block_jacobian; } } } if (!abort) { // Sum the cost and gradient (if requested) from each thread. (*cost) = 0.0; int num_parameters = program_->NumEffectiveParameters(); if (gradient != NULL) { VectorRef(gradient, num_parameters).setZero(); } for (int i = 0; i < options_.num_threads; ++i) { (*cost) += evaluate_scratch_[i].cost; if (gradient != NULL) { VectorRef(gradient, num_parameters) += VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters); } } } return !abort; } bool Plus(const double* state, const double* delta, double* state_plus_delta) const { return program_->Plus(state, delta, state_plus_delta); } int NumParameters() const { return program_->NumParameters(); } int NumEffectiveParameters() const { return program_->NumEffectiveParameters(); } int NumResiduals() const { return program_->NumResiduals(); } private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { void Init(int max_parameters_per_residual_block, int max_scratch_doubles_needed_for_evaluate, int max_residuals_per_residual_block, int num_parameters) { residual_block_evaluate_scratch.reset( new double[max_scratch_doubles_needed_for_evaluate]); gradient.reset(new double[num_parameters]); VectorRef(gradient.get(), num_parameters).setZero(); residual_block_residuals.reset( new double[max_residuals_per_residual_block]); jacobian_block_ptrs.reset( new double*[max_parameters_per_residual_block]); } double cost; scoped_array<double> residual_block_evaluate_scratch; // The gradient in the local parameterization. scoped_array<double> gradient; // Enough space to store the residual for the largest residual block. scoped_array<double> residual_block_residuals; scoped_array<double*> jacobian_block_ptrs; }; static void BuildResidualLayout(const Program& program, vector<int>* residual_layout) { const vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); residual_layout->resize(program.NumResidualBlocks()); int residual_pos = 0; for (int i = 0; i < residual_blocks.size(); ++i) { const int num_residuals = residual_blocks[i]->NumResiduals(); (*residual_layout)[i] = residual_pos; residual_pos += num_residuals; } } // Create scratch space for each thread evaluating the program. static EvaluateScratch* CreateEvaluatorScratch(const Program& program, int num_threads) { int max_parameters_per_residual_block = program.MaxParametersPerResidualBlock(); int max_scratch_doubles_needed_for_evaluate = program.MaxScratchDoublesNeededForEvaluate(); int max_residuals_per_residual_block = program.MaxResidualsPerResidualBlock(); int num_parameters = program.NumEffectiveParameters(); EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads]; for (int i = 0; i < num_threads; i++) { evaluate_scratch[i].Init(max_parameters_per_residual_block, max_scratch_doubles_needed_for_evaluate, max_residuals_per_residual_block, num_parameters); } return evaluate_scratch; } Evaluator::Options options_; Program* program_; JacobianWriter jacobian_writer_; scoped_array<EvaluatePreparer> evaluate_preparers_; scoped_array<EvaluateScratch> evaluate_scratch_; vector<int> residual_layout_; }; } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
clang-272521.c
#include <stdio.h> #include <complex.h> int main() { int i; float complex C = 0 + 0 * I; #pragma omp target parallel for reduction(+:C) map(tofrom: C) for (int i = 0; i <10; i++) { C += 1.0 + 1.0*I; } printf("C = %f+%fi\n", creal(C),cimag(C)); return 0; }
dataset.h
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, qurey level informations. * * Some details: * 1. Label, used for traning. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed) * the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1]) * 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null costructor */ Metadata(); /*! * \brief Initialization will load qurey level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename, const char* initscore_file); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indice of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get ranks, if not exists, will return nullptr * \return Pointer of ranks */ inline const size_t* ranks() const { /// if (!ranks_.empty()) { return ranks_.data(); } else { return nullptr; } } /*! * \brief Get prices, if not exists, will return nullptr * \return Pointer of prices */ inline const size_t* prices() const { /// if (!prices_.empty()) { return prices_.data(); } else { return nullptr; } } /*! * \brief Get item features scores, if not exists, will return nullptr * \return Pointer of item scores */ inline const size_t* itemScores() const { /// if (!item_scores_.empty()) { return item_scores_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(const char* initscore_file); /*! \brief Load weights from file */ void LoadWeights(); /*! \brief Load ranks from file */ void LoadRanks(); /// /*! \brief Load prices from file */ void LoadPrices(); /// /*! \brief Load item scores from file */ void LoadItemScores(); /// /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Number of ranks, used to check correct rank file */ data_size_t num_ranks_; /// /*! \brief Number of item scores, used to check correct rank file */ data_size_t num_item_scores_; /// /*! \brief Number of position, used to check correct rank file */ data_size_t num_prices_; /// /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief ranks data */ std::vector<size_t> ranks_; /// /*! \brief prices data */ std::vector<size_t> prices_; /// /*! \brief item scores data */ std::vector<size_t> item_scores_; /// /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool rank_load_from_file_; /// bool query_load_from_file_; bool init_score_load_from_file_; bool item_score_load_from_file_; bool price_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int TotalColumns() const = 0; /*! * \brief Create a object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool has_header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to traning or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>& bin_mappers, int** sample_non_zero_indices, const int* num_per_col, size_t total_sample_cnt, const IOConfig& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>& ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, bool is_constant_hessian, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i) const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureBin(int i) const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group) const { return feature_groups_[group]->is_sparse_; } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name: feature_names_){ if (feature_name.find(' ') != std::string::npos){ spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName){ Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; std::vector<int8_t> monotone_types_; bool is_finish_load_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
MorphologicalErosionImageFilter.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_MORPHOLOGICALEROSIONIMAGEFILTER_H #define BK_MORPHOLOGICALEROSIONIMAGEFILTER_H #include <algorithm> #include <cassert> #include <initializer_list> #include <type_traits> #include <vector> #include <bkDataset/image/filter/KernelFactory.h> #include <bkDataset/image/filter/MorphologicalOperationImageFilter.h> #include <bkDataset/image/filter/DistanceMapImageFilter.h> #include <bkDataset/lib/bkDataset_export.h> #ifdef BK_EMIT_PROGRESS #include <bk/Progress> #include <bk/Localization> #endif namespace bk { class BKDATASET_EXPORT MorphologicalErosionImageFilter { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = MorphologicalErosionImageFilter; //==================================================================================================== //===== MEMBERS //==================================================================================================== std::vector<unsigned int> _kernel_size; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR MorphologicalErosionImageFilter(); MorphologicalErosionImageFilter(const self_type& other); MorphologicalErosionImageFilter(self_type&& other) noexcept; MorphologicalErosionImageFilter(unsigned int nDims, unsigned int size); /// @} /// @{ -------------------------------------------------- DTOR ~MorphologicalErosionImageFilter(); /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET KERNEL SIZE [[nodiscard]] const std::vector<unsigned int>& kernel_size() const; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] auto operator=(const self_type& other) -> self_type&; [[maybe_unused]] auto operator=(self_type&& other) noexcept -> self_type&; /// @} /// @{ -------------------------------------------------- SET KERNEL SIZE template<typename T> void set_kernel_size(std::initializer_list<T> ilist) { _kernel_size.assign(ilist); } template<typename Iter> void set_kernel_size(Iter first, Iter last) { _kernel_size.assign(first, last); } void set_kernel_size(unsigned int nDims, unsigned int size); /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- APPLY template<typename TImage> [[nodiscard]] TImage apply(const TImage& img) const { assert(!_kernel_size.empty() && "call set_kernel_size() first"); const bool kernel_has_isotropic_size = std::all_of(_kernel_size.begin(), _kernel_size.end(), [&](unsigned int x) { return x == _kernel_size.front(); }); if (kernel_has_isotropic_size) { #ifdef BK_EMIT_PROGRESS bk::Progress& prog = bk_progress.emplace_task(3, ___("Morphological erosion filtering")); #endif TImage res; res.set_size(img.size()); const auto minVal = img.min_value(); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif DistanceMapImageFilter f; f.set_value(minVal); auto distance_map = img.filter(f); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif const unsigned int halfKernelSize = _kernel_size.front() >> 1; #pragma omp parallel for for (unsigned int i = 0; i < img.num_values(); ++i) { res[i] = distance_map[i] <= halfKernelSize ? minVal : img[i]; } #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif return res; } else { return MorphologicalOperationImageFilter::apply(img, KernelFactory::make_erosion_morphological_of_sizes(_kernel_size)); } } /// @} }; // class MorphologicalErosionImageFilter } // namespace bk #endif //BK_MORPHOLOGICALEROSIONIMAGEFILTER_H
c7f75db0cd39a618fa39b010b313075df98415a3.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int norm2(const float h_x, const float h_y, const float h_z, struct dataobj *restrict n_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int time_M, const int time_m, struct profiler * timers) { float (*restrict n) __attribute__ ((aligned (64))) = (float (*)) n_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; #pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) float sum = 0.0F; struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(2) reduction(+:sum) for (int time = time_m; time <= time_M; time += 1) { for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor((-o_x + rec_coords[p_rec][0])/h_x)); int ii_rec_1 = (int)(floor((-o_y + rec_coords[p_rec][1])/h_y)); int ii_rec_2 = (int)(floor((-o_z + rec_coords[p_rec][2])/h_z)); if (x_M >= ii_rec_0 && y_M >= ii_rec_1 && z_M >= ii_rec_2 && x_m <= ii_rec_0 && y_m <= ii_rec_1 && z_m <= ii_rec_2) { sum += fabs(pow(rec[time][p_rec], 2)); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; n[0] = sum; #pragma omp target exit data map(delete: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) return 0; }
convolution_3x3_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd43_transform_kernel_pack8to4_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8.create(inch / 8, 36, outch / 8 + (outch % 8) / 4, (size_t)2u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat kernel_tm = kernel_tm_pack8.channel(q / 8); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); const short* k40 = k4.row<const short>(p + i); const short* k50 = k5.row<const short>(p + i); const short* k60 = k6.row<const short>(p + i); const short* k70 = k7.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat kernel_tm = kernel_tm_pack8.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd43_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r05 = vld1_s8(r0 + 40); int8x8_t _v4s8 = vdup_n_s8(4); int8x8_t _v5s8 = vdup_n_s8(5); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); // int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8)); // int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4); // int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4); // int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8)); vst1q_s16(tmp[0][m], _tmp0m); vst1q_s16(tmp[1][m], _tmp1m); vst1q_s16(tmp[2][m], _tmp2m); vst1q_s16(tmp[3][m], _tmp3m); vst1q_s16(tmp[4][m], _tmp4m); vst1q_s16(tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { int16x8_t _tmp00 = vld1q_s16(tmp[m][0]); int16x8_t _tmp01 = vld1q_s16(tmp[m][1]); int16x8_t _tmp02 = vld1q_s16(tmp[m][2]); int16x8_t _tmp03 = vld1q_s16(tmp[m][3]); int16x8_t _tmp04 = vld1q_s16(tmp[m][4]); int16x8_t _tmp05 = vld1q_s16(tmp[m][5]); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); int16x8_t _v5 = vdupq_n_s16(5); int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5); int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5); vst1q_s16(r0_tm_0, _r0tm0); vst1q_s16(r0_tm_1, _r0tm1); vst1q_s16(r0_tm_2, _r0tm2); vst1q_s16(r0_tm_3, _r0tm3); vst1q_s16(r0_tm_4, _r0tm4); vst1q_s16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { short* tm2p = tm2.row<short>(i / 12); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } #endif // __aarch64__ for (; i + 3 < tiles; i += 4) { #if __aarch64__ short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else short* tmpptr = tm2.row<short>(i / 4); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3"); #endif r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.s16 {d0-d3}, [%0 :128] \n" "vst1.s16 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "q0", "q1"); #endif r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { #if __aarch64__ short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.s16 {d0-d1}, [%0 :128] \n" "vst1.s16 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "q0"); #endif r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel0_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const short* r0 = bb2.row<const short>(i / 12); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r01 "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w01 "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "prfm pldl1keep, [%3, #256] \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "prfm pldl1keep, [%4, #256] \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "smlal v8.4s, v4.4h, v0.h[0] \n" "smlal2 v20.4s, v4.8h, v0.h[0] \n" "smlal v9.4s, v4.4h, v0.h[1] \n" "smlal2 v21.4s, v4.8h, v0.h[1] \n" "smlal v10.4s, v4.4h, v0.h[2] \n" "smlal2 v22.4s, v4.8h, v0.h[2] \n" "smlal v11.4s, v4.4h, v0.h[3] \n" "smlal2 v23.4s, v4.8h, v0.h[3] \n" "smlal v12.4s, v4.4h, v0.h[4] \n" "smlal2 v24.4s, v4.8h, v0.h[4] \n" "smlal v13.4s, v4.4h, v0.h[5] \n" "smlal2 v25.4s, v4.8h, v0.h[5] \n" "smlal v14.4s, v4.4h, v0.h[6] \n" "smlal2 v26.4s, v4.8h, v0.h[6] \n" "smlal v15.4s, v4.4h, v0.h[7] \n" "smlal2 v27.4s, v4.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r23 "smlal v16.4s, v4.4h, v1.h[0] \n" "smlal2 v28.4s, v4.8h, v1.h[0] \n" "smlal v17.4s, v4.4h, v1.h[1] \n" "smlal2 v29.4s, v4.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v18.4s, v4.4h, v1.h[2] \n" "smlal2 v30.4s, v4.8h, v1.h[2] \n" "smlal v19.4s, v4.4h, v1.h[3] \n" "smlal2 v31.4s, v4.8h, v1.h[3] \n" "ld1 {v6.8h, v7.8h}, [%4], #32 \n" // w23 "smlal v8.4s, v5.4h, v1.h[4] \n" "smlal2 v20.4s, v5.8h, v1.h[4] \n" "smlal v9.4s, v5.4h, v1.h[5] \n" "smlal2 v21.4s, v5.8h, v1.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v5.4h, v1.h[6] \n" "smlal2 v22.4s, v5.8h, v1.h[6] \n" "smlal v11.4s, v5.4h, v1.h[7] \n" "smlal2 v23.4s, v5.8h, v1.h[7] \n" "smlal v12.4s, v5.4h, v2.h[0] \n" "smlal2 v24.4s, v5.8h, v2.h[0] \n" "smlal v13.4s, v5.4h, v2.h[1] \n" "smlal2 v25.4s, v5.8h, v2.h[1] \n" "smlal v14.4s, v5.4h, v2.h[2] \n" "smlal2 v26.4s, v5.8h, v2.h[2] \n" "smlal v15.4s, v5.4h, v2.h[3] \n" "smlal2 v27.4s, v5.8h, v2.h[3] \n" "smlal v16.4s, v5.4h, v2.h[4] \n" "smlal2 v28.4s, v5.8h, v2.h[4] \n" "smlal v17.4s, v5.4h, v2.h[5] \n" "smlal2 v29.4s, v5.8h, v2.h[5] \n" "smlal v18.4s, v5.4h, v2.h[6] \n" "smlal2 v30.4s, v5.8h, v2.h[6] \n" "smlal v19.4s, v5.4h, v2.h[7] \n" "smlal2 v31.4s, v5.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r45 "smlal v8.4s, v6.4h, v3.h[0] \n" "smlal2 v20.4s, v6.8h, v3.h[0] \n" "smlal v9.4s, v6.4h, v3.h[1] \n" "smlal2 v21.4s, v6.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v10.4s, v6.4h, v3.h[2] \n" "smlal2 v22.4s, v6.8h, v3.h[2] \n" "smlal v11.4s, v6.4h, v3.h[3] \n" "smlal2 v23.4s, v6.8h, v3.h[3] \n" "smlal v12.4s, v6.4h, v3.h[4] \n" "smlal2 v24.4s, v6.8h, v3.h[4] \n" "smlal v13.4s, v6.4h, v3.h[5] \n" "smlal2 v25.4s, v6.8h, v3.h[5] \n" "smlal v14.4s, v6.4h, v3.h[6] \n" "smlal2 v26.4s, v6.8h, v3.h[6] \n" "smlal v15.4s, v6.4h, v3.h[7] \n" "smlal2 v27.4s, v6.8h, v3.h[7] \n" "smlal v16.4s, v6.4h, v0.h[0] \n" "smlal2 v28.4s, v6.8h, v0.h[0] \n" "smlal v17.4s, v6.4h, v0.h[1] \n" "smlal2 v29.4s, v6.8h, v0.h[1] \n" "smlal v18.4s, v6.4h, v0.h[2] \n" "smlal2 v30.4s, v6.8h, v0.h[2] \n" "smlal v19.4s, v6.4h, v0.h[3] \n" "smlal2 v31.4s, v6.8h, v0.h[3] \n" "ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w45 "smlal v8.4s, v7.4h, v0.h[4] \n" "smlal2 v20.4s, v7.8h, v0.h[4] \n" "smlal v9.4s, v7.4h, v0.h[5] \n" "smlal2 v21.4s, v7.8h, v0.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v7.4h, v0.h[6] \n" "smlal2 v22.4s, v7.8h, v0.h[6] \n" "smlal v11.4s, v7.4h, v0.h[7] \n" "smlal2 v23.4s, v7.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r67 "smlal v12.4s, v7.4h, v1.h[0] \n" "smlal2 v24.4s, v7.8h, v1.h[0] \n" "smlal v13.4s, v7.4h, v1.h[1] \n" "smlal2 v25.4s, v7.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v14.4s, v7.4h, v1.h[2] \n" "smlal2 v26.4s, v7.8h, v1.h[2] \n" "smlal v15.4s, v7.4h, v1.h[3] \n" "smlal2 v27.4s, v7.8h, v1.h[3] \n" "smlal v16.4s, v7.4h, v1.h[4] \n" "smlal2 v28.4s, v7.8h, v1.h[4] \n" "smlal v17.4s, v7.4h, v1.h[5] \n" "smlal2 v29.4s, v7.8h, v1.h[5] \n" "smlal v18.4s, v7.4h, v1.h[6] \n" "smlal2 v30.4s, v7.8h, v1.h[6] \n" "smlal v19.4s, v7.4h, v1.h[7] \n" "smlal2 v31.4s, v7.8h, v1.h[7] \n" "smlal v8.4s, v4.4h, v2.h[0] \n" "smlal2 v20.4s, v4.8h, v2.h[0] \n" "smlal v9.4s, v4.4h, v2.h[1] \n" "smlal2 v21.4s, v4.8h, v2.h[1] \n" "smlal v10.4s, v4.4h, v2.h[2] \n" "smlal2 v22.4s, v4.8h, v2.h[2] \n" "smlal v11.4s, v4.4h, v2.h[3] \n" "smlal2 v23.4s, v4.8h, v2.h[3] \n" "smlal v12.4s, v4.4h, v2.h[4] \n" "smlal2 v24.4s, v4.8h, v2.h[4] \n" "smlal v13.4s, v4.4h, v2.h[5] \n" "smlal2 v25.4s, v4.8h, v2.h[5] \n" "smlal v14.4s, v4.4h, v2.h[6] \n" "smlal2 v26.4s, v4.8h, v2.h[6] \n" "smlal v15.4s, v4.4h, v2.h[7] \n" "smlal2 v27.4s, v4.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r89 "smlal v16.4s, v4.4h, v3.h[0] \n" "smlal2 v28.4s, v4.8h, v3.h[0] \n" "smlal v17.4s, v4.4h, v3.h[1] \n" "smlal2 v29.4s, v4.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v18.4s, v4.4h, v3.h[2] \n" "smlal2 v30.4s, v4.8h, v3.h[2] \n" "smlal v19.4s, v4.4h, v3.h[3] \n" "smlal2 v31.4s, v4.8h, v3.h[3] \n" "ld1 {v6.8h, v7.8h}, [%4], #32 \n" // w67 "smlal v8.4s, v5.4h, v3.h[4] \n" "smlal2 v20.4s, v5.8h, v3.h[4] \n" "smlal v9.4s, v5.4h, v3.h[5] \n" "smlal2 v21.4s, v5.8h, v3.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v5.4h, v3.h[6] \n" "smlal2 v22.4s, v5.8h, v3.h[6] \n" "smlal v11.4s, v5.4h, v3.h[7] \n" "smlal2 v23.4s, v5.8h, v3.h[7] \n" "smlal v12.4s, v5.4h, v0.h[0] \n" "smlal2 v24.4s, v5.8h, v0.h[0] \n" "smlal v13.4s, v5.4h, v0.h[1] \n" "smlal2 v25.4s, v5.8h, v0.h[1] \n" "smlal v14.4s, v5.4h, v0.h[2] \n" "smlal2 v26.4s, v5.8h, v0.h[2] \n" "smlal v15.4s, v5.4h, v0.h[3] \n" "smlal2 v27.4s, v5.8h, v0.h[3] \n" "smlal v16.4s, v5.4h, v0.h[4] \n" "smlal2 v28.4s, v5.8h, v0.h[4] \n" "smlal v17.4s, v5.4h, v0.h[5] \n" "smlal2 v29.4s, v5.8h, v0.h[5] \n" "smlal v18.4s, v5.4h, v0.h[6] \n" "smlal2 v30.4s, v5.8h, v0.h[6] \n" "smlal v19.4s, v5.4h, v0.h[7] \n" "smlal2 v31.4s, v5.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r1011 "smlal v8.4s, v6.4h, v1.h[0] \n" "smlal2 v20.4s, v6.8h, v1.h[0] \n" "smlal v9.4s, v6.4h, v1.h[1] \n" "smlal2 v21.4s, v6.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v10.4s, v6.4h, v1.h[2] \n" "smlal2 v22.4s, v6.8h, v1.h[2] \n" "smlal v11.4s, v6.4h, v1.h[3] \n" "smlal2 v23.4s, v6.8h, v1.h[3] \n" "smlal v12.4s, v6.4h, v1.h[4] \n" "smlal2 v24.4s, v6.8h, v1.h[4] \n" "smlal v13.4s, v6.4h, v1.h[5] \n" "smlal2 v25.4s, v6.8h, v1.h[5] \n" "smlal v14.4s, v6.4h, v1.h[6] \n" "smlal2 v26.4s, v6.8h, v1.h[6] \n" "smlal v15.4s, v6.4h, v1.h[7] \n" "smlal2 v27.4s, v6.8h, v1.h[7] \n" "smlal v16.4s, v6.4h, v2.h[0] \n" "smlal2 v28.4s, v6.8h, v2.h[0] \n" "smlal v17.4s, v6.4h, v2.h[1] \n" "smlal2 v29.4s, v6.8h, v2.h[1] \n" "smlal v18.4s, v6.4h, v2.h[2] \n" "smlal2 v30.4s, v6.8h, v2.h[2] \n" "smlal v19.4s, v6.4h, v2.h[3] \n" "smlal2 v31.4s, v6.8h, v2.h[3] \n" "ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w01 "smlal v8.4s, v7.4h, v2.h[4] \n" "smlal2 v20.4s, v7.8h, v2.h[4] \n" "smlal v9.4s, v7.4h, v2.h[5] \n" "smlal2 v21.4s, v7.8h, v2.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v7.4h, v2.h[6] \n" "smlal2 v22.4s, v7.8h, v2.h[6] \n" "smlal v11.4s, v7.4h, v2.h[7] \n" "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r01 "smlal v12.4s, v7.4h, v3.h[0] \n" "smlal2 v24.4s, v7.8h, v3.h[0] \n" "smlal v13.4s, v7.4h, v3.h[1] \n" "smlal2 v25.4s, v7.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v14.4s, v7.4h, v3.h[2] \n" "smlal2 v26.4s, v7.8h, v3.h[2] \n" "smlal v15.4s, v7.4h, v3.h[3] \n" "smlal2 v27.4s, v7.8h, v3.h[3] \n" "smlal v16.4s, v7.4h, v3.h[4] \n" "smlal2 v28.4s, v7.8h, v3.h[4] \n" "smlal v17.4s, v7.4h, v3.h[5] \n" "smlal2 v29.4s, v7.8h, v3.h[5] \n" "subs %w0, %w0, #1 \n" "smlal v18.4s, v7.4h, v3.h[6] \n" "smlal2 v30.4s, v7.8h, v3.h[6] \n" "smlal v19.4s, v7.4h, v3.h[7] \n" "smlal2 v31.4s, v7.8h, v3.h[7] \n" "bne 0b \n" "sub %3, %3, #32 \n" "sub %4, %4, #32 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int32x4_t _sum8 = vdupq_n_s32(0); int32x4_t _sum9 = vdupq_n_s32(0); int32x4_t _suma = vdupq_n_s32(0); int32x4_t _sumb = vdupq_n_s32(0); int32x4_t _sumc = vdupq_n_s32(0); int32x4_t _sumd = vdupq_n_s32(0); int32x4_t _sume = vdupq_n_s32(0); int32x4_t _sumf = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _val4 = vld1q_s16(r0 + 32); int16x8_t _val5 = vld1q_s16(r0 + 40); int16x8_t _val6 = vld1q_s16(r0 + 48); int16x8_t _val7 = vld1q_s16(r0 + 56); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val0), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val0), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val0), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val0), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val0), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w0), vget_high_s16(_val0), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w0), vget_high_s16(_val0), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w0), vget_high_s16(_val0), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w0), vget_high_s16(_val0), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w0), vget_high_s16(_val0), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w0), vget_high_s16(_val0), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w0), vget_high_s16(_val0), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w0), vget_high_s16(_val0), 3); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val1), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val1), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val1), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val1), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val1), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val1), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w1), vget_high_s16(_val1), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w1), vget_high_s16(_val1), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w1), vget_high_s16(_val1), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w1), vget_high_s16(_val1), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w1), vget_high_s16(_val1), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w1), vget_high_s16(_val1), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w1), vget_high_s16(_val1), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w1), vget_high_s16(_val1), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val2), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val2), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val2), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val2), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_low_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_low_s16(_val2), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_low_s16(_val2), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_low_s16(_val2), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w2), vget_high_s16(_val2), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w2), vget_high_s16(_val2), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w2), vget_high_s16(_val2), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w2), vget_high_s16(_val2), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w2), vget_high_s16(_val2), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w2), vget_high_s16(_val2), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w2), vget_high_s16(_val2), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w2), vget_high_s16(_val2), 3); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val3), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val3), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val3), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val3), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_low_s16(_val3), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_low_s16(_val3), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_low_s16(_val3), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_low_s16(_val3), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w3), vget_high_s16(_val3), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w3), vget_high_s16(_val3), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w3), vget_high_s16(_val3), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w3), vget_high_s16(_val3), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w3), vget_high_s16(_val3), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w3), vget_high_s16(_val3), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w3), vget_high_s16(_val3), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w3), vget_high_s16(_val3), 3); int16x8_t _w4 = vld1q_s16(k0 + 32); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_low_s16(_val4), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_low_s16(_val4), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_low_s16(_val4), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_low_s16(_val4), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w4), vget_low_s16(_val4), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w4), vget_low_s16(_val4), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w4), vget_low_s16(_val4), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w4), vget_low_s16(_val4), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w4), vget_high_s16(_val4), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w4), vget_high_s16(_val4), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w4), vget_high_s16(_val4), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w4), vget_high_s16(_val4), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w4), vget_high_s16(_val4), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w4), vget_high_s16(_val4), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w4), vget_high_s16(_val4), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w4), vget_high_s16(_val4), 3); int16x8_t _w5 = vld1q_s16(k0 + 40); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_low_s16(_val5), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_low_s16(_val5), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_low_s16(_val5), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_low_s16(_val5), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w5), vget_low_s16(_val5), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w5), vget_low_s16(_val5), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w5), vget_low_s16(_val5), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w5), vget_low_s16(_val5), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w5), vget_high_s16(_val5), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w5), vget_high_s16(_val5), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w5), vget_high_s16(_val5), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w5), vget_high_s16(_val5), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w5), vget_high_s16(_val5), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w5), vget_high_s16(_val5), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w5), vget_high_s16(_val5), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w5), vget_high_s16(_val5), 3); int16x8_t _w6 = vld1q_s16(k0 + 48); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_low_s16(_val6), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_low_s16(_val6), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_low_s16(_val6), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_low_s16(_val6), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w6), vget_low_s16(_val6), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w6), vget_low_s16(_val6), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w6), vget_low_s16(_val6), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w6), vget_low_s16(_val6), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w6), vget_high_s16(_val6), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w6), vget_high_s16(_val6), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w6), vget_high_s16(_val6), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w6), vget_high_s16(_val6), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w6), vget_high_s16(_val6), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w6), vget_high_s16(_val6), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w6), vget_high_s16(_val6), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w6), vget_high_s16(_val6), 3); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_low_s16(_val7), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_low_s16(_val7), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_low_s16(_val7), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_low_s16(_val7), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w7), vget_low_s16(_val7), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w7), vget_low_s16(_val7), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w7), vget_low_s16(_val7), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w7), vget_low_s16(_val7), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w7), vget_high_s16(_val7), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w7), vget_high_s16(_val7), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w7), vget_high_s16(_val7), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w7), vget_high_s16(_val7), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w7), vget_high_s16(_val7), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w7), vget_high_s16(_val7), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w7), vget_high_s16(_val7), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w7), vget_high_s16(_val7), 3); r0 += 64; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output1_tm + 4, _sum3); vst1q_s32(output0_tm + 8, _sum4); vst1q_s32(output1_tm + 8, _sum5); vst1q_s32(output0_tm + 12, _sum6); vst1q_s32(output1_tm + 12, _sum7); vst1q_s32(output0_tm + 16, _sum8); vst1q_s32(output1_tm + 16, _sum9); vst1q_s32(output0_tm + 20, _suma); vst1q_s32(output1_tm + 20, _sumb); vst1q_s32(output0_tm + 24, _sumc); vst1q_s32(output1_tm + 24, _sumd); vst1q_s32(output0_tm + 28, _sume); vst1q_s32(output1_tm + 28, _sumf); output0_tm += 32; output1_tm += 32; } #endif // __aarch64__ for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __aarch64__ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 0); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val2), 0); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val3), 0); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val2), 1); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val3), 1); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val3), 1); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val1), 2); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_low_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_low_s16(_val2), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_low_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_low_s16(_val3), 2); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_low_s16(_val2), 3); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_low_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_low_s16(_val3), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_low_s16(_val3), 3); int16x8_t _w4 = vld1q_s16(k0 + 32); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_high_s16(_val1), 0); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w4), vget_high_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w4), vget_high_s16(_val2), 0); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w4), vget_high_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w4), vget_high_s16(_val3), 0); int16x8_t _w5 = vld1q_s16(k0 + 40); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_high_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_high_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w5), vget_high_s16(_val2), 1); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w5), vget_high_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w5), vget_high_s16(_val3), 1); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w5), vget_high_s16(_val3), 1); int16x8_t _w6 = vld1q_s16(k0 + 48); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_high_s16(_val1), 2); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w6), vget_high_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w6), vget_high_s16(_val2), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w6), vget_high_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w6), vget_high_s16(_val3), 2); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_high_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_high_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w7), vget_high_s16(_val2), 3); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w7), vget_high_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w7), vget_high_s16(_val3), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w7), vget_high_s16(_val3), 3); r0 += 32; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output1_tm + 4, _sum3); vst1q_s32(output0_tm + 8, _sum4); vst1q_s32(output1_tm + 8, _sum5); vst1q_s32(output0_tm + 12, _sum6); vst1q_s32(output1_tm + 12, _sum7); output0_tm += 16; output1_tm += 16; #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%3, #256] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" "pld [%4, #256] \n" "vld1.s16 {d8-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d0[0] \n" "vmlal.s16 q12, d9, d0[0] \n" "vmlal.s16 q9, d8, d2[0] \n" "vmlal.s16 q13, d9, d2[0] \n" "vmlal.s16 q10, d8, d4[0] \n" "vmlal.s16 q14, d9, d4[0] \n" "vmlal.s16 q11, d8, d6[0] \n" "vmlal.s16 q15, d9, d6[0] \n" "pld [%4, #128] \n" "vld1.s16 {d8-d9}, [%4 :128]! \n" "vmlal.s16 q8, d10, d0[1] \n" "vmlal.s16 q12, d11, d0[1] \n" "vmlal.s16 q9, d10, d2[1] \n" "vmlal.s16 q13, d11, d2[1] \n" "vmlal.s16 q10, d10, d4[1] \n" "vmlal.s16 q14, d11, d4[1] \n" "vmlal.s16 q11, d10, d6[1] \n" "vmlal.s16 q15, d11, d6[1] \n" "pld [%4, #128] \n" "vld1.s16 {d10-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d0[2] \n" "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q9, d8, d2[2] \n" "vmlal.s16 q13, d9, d2[2] \n" "vmlal.s16 q10, d8, d4[2] \n" "vmlal.s16 q14, d9, d4[2] \n" "vmlal.s16 q11, d8, d6[2] \n" "vmlal.s16 q15, d9, d6[2] \n" "pld [%4, #128] \n" "vld1.s16 {d8-d9}, [%4 :128]! \n" "vmlal.s16 q8, d10, d0[3] \n" "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q9, d10, d2[3] \n" "vmlal.s16 q13, d11, d2[3] \n" "vmlal.s16 q10, d10, d4[3] \n" "vmlal.s16 q14, d11, d4[3] \n" "vmlal.s16 q11, d10, d6[3] \n" "vmlal.s16 q15, d11, d6[3] \n" "pld [%4, #128] \n" "vld1.s16 {d10-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d1[0] \n" "vmlal.s16 q12, d9, d1[0] \n" "vmlal.s16 q9, d8, d3[0] \n" "vmlal.s16 q13, d9, d3[0] \n" "vmlal.s16 q10, d8, d5[0] \n" "vmlal.s16 q14, d9, d5[0] \n" "vmlal.s16 q11, d8, d7[0] \n" "vmlal.s16 q15, d9, d7[0] \n" "pld [%4, #128] \n" "vld1.s16 {d8-d9}, [%4 :128]! \n" "vmlal.s16 q8, d10, d1[1] \n" "vmlal.s16 q12, d11, d1[1] \n" "vmlal.s16 q9, d10, d3[1] \n" "vmlal.s16 q13, d11, d3[1] \n" "vmlal.s16 q10, d10, d5[1] \n" "vmlal.s16 q14, d11, d5[1] \n" "vmlal.s16 q11, d10, d7[1] \n" "vmlal.s16 q15, d11, d7[1] \n" "pld [%4, #128] \n" "vld1.s16 {d10-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d1[2] \n" "vmlal.s16 q12, d9, d1[2] \n" "vmlal.s16 q9, d8, d3[2] \n" "vmlal.s16 q13, d9, d3[2] \n" "vmlal.s16 q10, d8, d5[2] \n" "vmlal.s16 q14, d9, d5[2] \n" "vmlal.s16 q11, d8, d7[2] \n" "vmlal.s16 q15, d9, d7[2] \n" "subs %0, %0, #1 \n" "vmlal.s16 q8, d10, d1[3] \n" "vmlal.s16 q12, d11, d1[3] \n" "vmlal.s16 q9, d10, d3[3] \n" "vmlal.s16 q13, d11, d3[3] \n" "vmlal.s16 q10, d10, d5[3] \n" "vmlal.s16 q14, d11, d5[3] \n" "vmlal.s16 q11, d10, d7[3] \n" "vmlal.s16 q15, d11, d7[3] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %2!, {d24-d31} \n" : "=r"(nn), "=r"(output0_tm), "=r"(output1_tm), "=r"(r0), "=r"(k0) : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); int16x8_t _w4 = vld1q_s16(k0 + 32); int16x8_t _w5 = vld1q_s16(k0 + 40); int16x8_t _w6 = vld1q_s16(k0 + 48); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val1), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val1), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_high_s16(_val1), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_high_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_high_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_high_s16(_val1), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_high_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_high_s16(_val1), 3); r0 += 16; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output1_tm + 4, _sum3); output0_tm += 8; output1_tm += 8; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); int16x8_t _w4 = vld1q_s16(k0 + 32); int16x8_t _w5 = vld1q_s16(k0 + 40); int16x8_t _w6 = vld1q_s16(k0 + 48); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); r0 += 8; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const short* r0 = bb2.row<const short>(i / 12); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "ld1 {v4.8h, v5.8h}, [%3], #32 \n" // w01 "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "prfm pldl1keep, [%2, #256] \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "prfm pldl1keep, [%3, #256] \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "smlal v8.4s, v4.4h, v0.h[0] \n" "smlal v9.4s, v4.4h, v0.h[1] \n" "smlal v10.4s, v4.4h, v0.h[2] \n" "smlal v11.4s, v4.4h, v0.h[3] \n" "smlal v12.4s, v4.4h, v0.h[4] \n" "smlal v13.4s, v4.4h, v0.h[5] \n" "smlal v14.4s, v4.4h, v0.h[6] \n" "smlal v15.4s, v4.4h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r23 "smlal v16.4s, v4.4h, v1.h[0] \n" "smlal v17.4s, v4.4h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v18.4s, v4.4h, v1.h[2] \n" "smlal v19.4s, v4.4h, v1.h[3] \n" "smlal2 v8.4s, v4.8h, v1.h[4] \n" "smlal2 v9.4s, v4.8h, v1.h[5] \n" "smlal2 v10.4s, v4.8h, v1.h[6] \n" "smlal2 v11.4s, v4.8h, v1.h[7] \n" "smlal2 v12.4s, v4.8h, v2.h[0] \n" "smlal2 v13.4s, v4.8h, v2.h[1] \n" "smlal2 v14.4s, v4.8h, v2.h[2] \n" "smlal2 v15.4s, v4.8h, v2.h[3] \n" "smlal2 v16.4s, v4.8h, v2.h[4] \n" "smlal2 v17.4s, v4.8h, v2.h[5] \n" "smlal2 v18.4s, v4.8h, v2.h[6] \n" "smlal2 v19.4s, v4.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r45 "smlal v8.4s, v5.4h, v3.h[0] \n" "smlal v9.4s, v5.4h, v3.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v10.4s, v5.4h, v3.h[2] \n" "smlal v11.4s, v5.4h, v3.h[3] \n" "smlal v12.4s, v5.4h, v3.h[4] \n" "smlal v13.4s, v5.4h, v3.h[5] \n" "smlal v14.4s, v5.4h, v3.h[6] \n" "smlal v15.4s, v5.4h, v3.h[7] \n" "smlal v16.4s, v5.4h, v0.h[0] \n" "smlal v17.4s, v5.4h, v0.h[1] \n" "smlal v18.4s, v5.4h, v0.h[2] \n" "smlal v19.4s, v5.4h, v0.h[3] \n" "ld1 {v6.8h, v7.8h}, [%3], #32 \n" // w23 "smlal2 v8.4s, v5.8h, v0.h[4] \n" "smlal2 v9.4s, v5.8h, v0.h[5] \n" "prfm pldl1keep, [%3, #256] \n" "smlal2 v10.4s, v5.8h, v0.h[6] \n" "smlal2 v11.4s, v5.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r67 "smlal2 v12.4s, v5.8h, v1.h[0] \n" "smlal2 v13.4s, v5.8h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal2 v14.4s, v5.8h, v1.h[2] \n" "smlal2 v15.4s, v5.8h, v1.h[3] \n" "smlal2 v16.4s, v5.8h, v1.h[4] \n" "smlal2 v17.4s, v5.8h, v1.h[5] \n" "smlal2 v18.4s, v5.8h, v1.h[6] \n" "smlal2 v19.4s, v5.8h, v1.h[7] \n" "smlal v8.4s, v6.4h, v2.h[0] \n" "smlal v9.4s, v6.4h, v2.h[1] \n" "smlal v10.4s, v6.4h, v2.h[2] \n" "smlal v11.4s, v6.4h, v2.h[3] \n" "smlal v12.4s, v6.4h, v2.h[4] \n" "smlal v13.4s, v6.4h, v2.h[5] \n" "smlal v14.4s, v6.4h, v2.h[6] \n" "smlal v15.4s, v6.4h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r89 "smlal v16.4s, v6.4h, v3.h[0] \n" "smlal v17.4s, v6.4h, v3.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v18.4s, v6.4h, v3.h[2] \n" "smlal v19.4s, v6.4h, v3.h[3] \n" "smlal2 v8.4s, v6.8h, v3.h[4] \n" "smlal2 v9.4s, v6.8h, v3.h[5] \n" "smlal2 v10.4s, v6.8h, v3.h[6] \n" "smlal2 v11.4s, v6.8h, v3.h[7] \n" "smlal2 v12.4s, v6.8h, v0.h[0] \n" "smlal2 v13.4s, v6.8h, v0.h[1] \n" "smlal2 v14.4s, v6.8h, v0.h[2] \n" "smlal2 v15.4s, v6.8h, v0.h[3] \n" "smlal2 v16.4s, v6.8h, v0.h[4] \n" "smlal2 v17.4s, v6.8h, v0.h[5] \n" "smlal2 v18.4s, v6.8h, v0.h[6] \n" "smlal2 v19.4s, v6.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r1011 "smlal v8.4s, v7.4h, v1.h[0] \n" "smlal v9.4s, v7.4h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v10.4s, v7.4h, v1.h[2] \n" "smlal v11.4s, v7.4h, v1.h[3] \n" "smlal v12.4s, v7.4h, v1.h[4] \n" "smlal v13.4s, v7.4h, v1.h[5] \n" "smlal v14.4s, v7.4h, v1.h[6] \n" "smlal v15.4s, v7.4h, v1.h[7] \n" "smlal v16.4s, v7.4h, v2.h[0] \n" "smlal v17.4s, v7.4h, v2.h[1] \n" "smlal v18.4s, v7.4h, v2.h[2] \n" "smlal v19.4s, v7.4h, v2.h[3] \n" "ld1 {v4.8h, v5.8h}, [%3], #32 \n" // w01 "smlal2 v8.4s, v7.8h, v2.h[4] \n" "smlal2 v9.4s, v7.8h, v2.h[5] \n" "prfm pldl1keep, [%3, #256] \n" "smlal2 v10.4s, v7.8h, v2.h[6] \n" "smlal2 v11.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "smlal2 v12.4s, v7.8h, v3.h[0] \n" "smlal2 v13.4s, v7.8h, v3.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal2 v14.4s, v7.8h, v3.h[2] \n" "smlal2 v15.4s, v7.8h, v3.h[3] \n" "smlal2 v16.4s, v7.8h, v3.h[4] \n" "smlal2 v17.4s, v7.8h, v3.h[5] \n" "subs %w0, %w0, #1 \n" "smlal2 v18.4s, v7.8h, v3.h[6] \n" "smlal2 v19.4s, v7.8h, v3.h[7] \n" "bne 0b \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _val4 = vld1q_s16(r0 + 32); int16x8_t _val5 = vld1q_s16(r0 + 40); int16x8_t _val6 = vld1q_s16(r0 + 48); int16x8_t _val7 = vld1q_s16(r0 + 56); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w0), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val0), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w0), vget_low_s16(_val0), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_high_s16(_val0), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w0), vget_high_s16(_val0), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_high_s16(_val0), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w0), vget_high_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w0), vget_low_s16(_val1), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val1), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w0), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w0), vget_high_s16(_val1), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_high_s16(_val1), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w0), vget_high_s16(_val1), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_high_s16(_val1), 3); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val2), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w1), vget_low_s16(_val2), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val2), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w1), vget_low_s16(_val2), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_high_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w1), vget_high_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_high_s16(_val2), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w1), vget_high_s16(_val2), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w1), vget_low_s16(_val3), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val3), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w1), vget_low_s16(_val3), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val3), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w1), vget_high_s16(_val3), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_high_s16(_val3), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w1), vget_high_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_high_s16(_val3), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val4), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w2), vget_low_s16(_val4), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val4), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w2), vget_low_s16(_val4), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_high_s16(_val4), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w2), vget_high_s16(_val4), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_high_s16(_val4), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w2), vget_high_s16(_val4), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w2), vget_low_s16(_val5), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val5), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w2), vget_low_s16(_val5), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val5), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w2), vget_high_s16(_val5), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_high_s16(_val5), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w2), vget_high_s16(_val5), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_high_s16(_val5), 3); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val6), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w3), vget_low_s16(_val6), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val6), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w3), vget_low_s16(_val6), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_high_s16(_val6), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w3), vget_high_s16(_val6), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_high_s16(_val6), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w3), vget_high_s16(_val6), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w3), vget_low_s16(_val7), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val7), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w3), vget_low_s16(_val7), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val7), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w3), vget_high_s16(_val7), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_high_s16(_val7), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w3), vget_high_s16(_val7), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_high_s16(_val7), 3); r0 += 64; k0 += 32; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum1); vst1q_s32(output0_tm + 8, _sum2); vst1q_s32(output0_tm + 12, _sum3); vst1q_s32(output0_tm + 16, _sum4); vst1q_s32(output0_tm + 20, _sum5); vst1q_s32(output0_tm + 24, _sum6); vst1q_s32(output0_tm + 28, _sum7); output0_tm += 32; } #endif // __aarch64__ for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __aarch64__ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val3), 1); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val3), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_high_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_high_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_high_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_high_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_high_s16(_val3), 1); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_high_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_high_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_high_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_high_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_high_s16(_val3), 3); r0 += 32; k0 += 32; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output0_tm + 8, _sum4); vst1q_s32(output0_tm + 12, _sum6); output0_tm += 16; #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #256] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #256] \n" "vld1.s16 {d8-d11}, [%3 :128]! \n" "vmlal.s16 q8, d8, d0[0] \n" "vmlal.s16 q12, d9, d0[1] \n" "vmlal.s16 q9, d8, d2[0] \n" "vmlal.s16 q13, d9, d2[1] \n" "vmlal.s16 q10, d8, d4[0] \n" "vmlal.s16 q14, d9, d4[1] \n" "vmlal.s16 q11, d8, d6[0] \n" "vmlal.s16 q15, d9, d6[1] \n" "pld [%3, #128] \n" "vld1.s16 {d8-d9}, [%3 :128]! \n" "vmlal.s16 q8, d10, d0[2] \n" "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q9, d10, d2[2] \n" "vmlal.s16 q13, d11, d2[3] \n" "vmlal.s16 q10, d10, d4[2] \n" "vmlal.s16 q14, d11, d4[3] \n" "vmlal.s16 q11, d10, d6[2] \n" "vmlal.s16 q15, d11, d6[3] \n" "pld [%3, #128] \n" "vld1.s16 {d10-d11}, [%3 :128]! \n" "vmlal.s16 q8, d8, d1[0] \n" "vmlal.s16 q12, d9, d1[1] \n" "vmlal.s16 q9, d8, d3[0] \n" "vmlal.s16 q13, d9, d3[1] \n" "vmlal.s16 q10, d8, d5[0] \n" "vmlal.s16 q14, d9, d5[1] \n" "vmlal.s16 q11, d8, d7[0] \n" "vmlal.s16 q15, d9, d7[1] \n" "subs %0, %0, #1 \n" "vmlal.s16 q8, d10, d1[2] \n" "vmlal.s16 q12, d11, d1[3] \n" "vmlal.s16 q9, d10, d3[2] \n" "vmlal.s16 q13, d11, d3[3] \n" "vmlal.s16 q10, d10, d5[2] \n" "vmlal.s16 q14, d11, d5[3] \n" "vmlal.s16 q11, d10, d7[2] \n" "vmlal.s16 q15, d11, d7[3] \n" "bne 0b \n" "vadd.s32 q8, q8, q12 \n" "vadd.s32 q9, q9, q13 \n" "vadd.s32 q10, q10, q14 \n" "vadd.s32 q11, q11, q15 \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), "=r"(output0_tm), "=r"(r0), "=r"(k0) : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_high_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_high_s16(_val1), 3); r0 += 16; k0 += 32; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum2); output0_tm += 8; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3); r0 += 8; k0 += 32; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_s32(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4; const int* output0_tm_1 = output0_tm_0 + tiles * 4; const int* output0_tm_2 = output0_tm_0 + tiles * 8; const int* output0_tm_3 = output0_tm_0 + tiles * 12; const int* output0_tm_4 = output0_tm_0 + tiles * 16; const int* output0_tm_5 = output0_tm_0 + tiles * 20; int* output0 = out0.row<int>(i * 4) + (j * 4) * 4; // TODO neon optimize for (int m = 0; m < 5; m++) { int32x4_t _out0tm0 = vld1q_s32(output0_tm_0); int32x4_t _out0tm1 = vld1q_s32(output0_tm_1); int32x4_t _out0tm2 = vld1q_s32(output0_tm_2); int32x4_t _out0tm3 = vld1q_s32(output0_tm_3); int32x4_t _out0tm4 = vld1q_s32(output0_tm_4); int32x4_t _out0tm5 = vld1q_s32(output0_tm_5); int32x4_t _tmp02a = vaddq_s32(_out0tm1, _out0tm2); int32x4_t _tmp13a = vsubq_s32(_out0tm1, _out0tm2); int32x4_t _tmp02b = vaddq_s32(_out0tm3, _out0tm4); int32x4_t _tmp13b = vsubq_s32(_out0tm3, _out0tm4); int32x4_t _v2 = vdupq_n_s32(2); int32x4_t _v4 = vdupq_n_s32(4); int32x4_t _v8 = vdupq_n_s32(8); int32x4_t _tmp0m = vaddq_s32(vaddq_s32(_out0tm0, _tmp02a), _tmp02b); int32x4_t _tmp1m = vmlaq_s32(_tmp13a, _tmp13b, _v2); int32x4_t _tmp2m = vmlaq_s32(_tmp02a, _tmp02b, _v4); int32x4_t _tmp3m = vmlaq_s32(vmlaq_s32(_tmp13a, _out0tm5, _v4), _tmp13b, _v8); vst1q_s32(tmp[0][m], _tmp0m); vst1q_s32(tmp[1][m], _tmp1m); vst1q_s32(tmp[2][m], _tmp2m); vst1q_s32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 5; m < 6; m++) { int32x4_t _out0tm0 = vld1q_s32(output0_tm_0); int32x4_t _out0tm1 = vld1q_s32(output0_tm_1); int32x4_t _out0tm2 = vld1q_s32(output0_tm_2); int32x4_t _out0tm3 = vld1q_s32(output0_tm_3); int32x4_t _out0tm4 = vld1q_s32(output0_tm_4); int32x4_t _out0tm5 = vld1q_s32(output0_tm_5); int32x4_t _tmp02a = vaddq_s32(_out0tm1, _out0tm2); int32x4_t _tmp13a = vsubq_s32(_out0tm1, _out0tm2); int32x4_t _tmp02b = vaddq_s32(_out0tm3, _out0tm4); int32x4_t _tmp13b = vsubq_s32(_out0tm3, _out0tm4); int32x4_t _v2 = vdupq_n_s32(2); int32x4_t _v4 = vdupq_n_s32(4); int32x4_t _v8 = vdupq_n_s32(8); int32x4_t _tmp0m = vaddq_s32(vaddq_s32(_out0tm0, _tmp02a), _tmp02b); int32x4_t _tmp1m = vmlaq_s32(_tmp13a, _tmp13b, _v2); int32x4_t _tmp2m = vmlaq_s32(_tmp02a, _tmp02b, _v4); int32x4_t _tmp3m = vmlaq_s32(vmlaq_s32(_tmp13a, _out0tm5, _v4), _tmp13b, _v8); _tmp0m = vmulq_s32(_tmp0m, _v4); _tmp1m = vmulq_s32(_tmp1m, _v4); _tmp2m = vmulq_s32(_tmp2m, _v4); _tmp3m = vmulq_s32(_tmp3m, _v4); vst1q_s32(tmp[0][m], _tmp0m); vst1q_s32(tmp[1][m], _tmp1m); vst1q_s32(tmp[2][m], _tmp2m); vst1q_s32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { int32x4_t _tmp00 = vld1q_s32(tmp[m][0]); int32x4_t _tmp01 = vld1q_s32(tmp[m][1]); int32x4_t _tmp02 = vld1q_s32(tmp[m][2]); int32x4_t _tmp03 = vld1q_s32(tmp[m][3]); int32x4_t _tmp04 = vld1q_s32(tmp[m][4]); int32x4_t _tmp05 = vld1q_s32(tmp[m][5]); int32x4_t _tmp02a = vaddq_s32(_tmp01, _tmp02); int32x4_t _tmp13a = vsubq_s32(_tmp01, _tmp02); int32x4_t _tmp02b = vaddq_s32(_tmp03, _tmp04); int32x4_t _tmp13b = vsubq_s32(_tmp03, _tmp04); int32x4_t _v2 = vdupq_n_s32(2); int32x4_t _v4 = vdupq_n_s32(4); int32x4_t _v8 = vdupq_n_s32(8); int32x4_t _out00 = vaddq_s32(vaddq_s32(_tmp00, _tmp02a), _tmp02b); int32x4_t _out01 = vmlaq_s32(_tmp13a, _tmp13b, _v2); int32x4_t _out02 = vmlaq_s32(_tmp02a, _tmp02b, _v4); int32x4_t _out03 = vmlaq_s32(vaddq_s32(_tmp05, _tmp13a), _tmp13b, _v8); // TODO use integer trick for division by 576 float32x4_t _v576 = vdupq_n_f32(1.0 / 576); _out00 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out00), _v576)); _out01 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out01), _v576)); _out02 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out02), _v576)); _out03 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out03), _v576)); vst1q_s32(output0, _out00); vst1q_s32(output0 + 4, _out01); vst1q_s32(output0 + 8, _out02); vst1q_s32(output0 + 12, _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
wino_conv_kernel_1_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: zhli@openailab.com */ #ifdef __aarch64__ #include <stdint.h> #include <stdlib.h> #include <math.h> #include <arm_neon.h> #include <omp.h> #include "wino_conv_kernel_1_arm.h" #define TILE 4 #define BLOCK_HW_UNIT 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) // #ifdef __aarch64__ #define PER_OUT_CHAN 16 #define KER_COUT_UNIT 16 #define KER_COUT_UNIT4 4 void tran_inp_4(float*, float*, float*, int, int, int); void wino_sgemm_4x16_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_4x4_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_1x16(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_1x4(float* output, const float* input, const float* kernel, long cin); void tran_out_4(float*, float*, int, float*, float*, int); // #else // #define PER_OUT_CHAN 12 // void wino_sgemm_4x12_A17(float* output, const float* input, const float* kernel, long cin); // void wino_sgemm_4x4_A17(float* output, const float* input, const float* kernel, long cin); // void wino_sgemm_1x12_A17(float* output, const float* input, const float* kernel, long cin); // // need to be optimized by neon // static inline void wino_sgemm_1x4_cpu(float* output, const float* input, const float* kernel, long cin) // { // for (int i = 0; i < 4; i++) // { // float sum = 0; // for (int k = 0; k < cin; k++) // { // sum += input[k] * kernel[k * 4 + i]; // } // output[i] = sum; // } // } // #endif #define INTERLEAVE_KERNEL_UNIT(cout_idx_p,cout_unit,cin,ker_src,ker_dst,ELEM_SIZE,i,j,s){ \ for(i = 0; i < cin; i++){ \ for(j = 0; j < cout_unit; j++){ \ *ker_dst = ker_src[((cout_idx_p + j) * cin + i) * ELEM_SIZE + s]; \ ker_dst++; \ } \ }} static inline void trans_kernel_f43(float* ker, float* trans_ker) { /* float G[18]={ 1./4 , 0. , 0. , -1./6 , -1./6 , -1./6 , -1./6 , 1./6 , -1./6 , 1./24 , 1./12 , 1./6 , 1./24 , -1./12 , 1./6 , 0. , 0. , 1. }; float GT[18]={ 1./4 , -1./6, -1./6 , 1./24, 1./24 , 0., 0., -1./6, 1./6 , 1./12, -1./12 , 0., 0., -1./6, -1./6 , 1./6, 1./6 , 1. }; */ float tmp[18] = {0}; float neg_r0_add_r2_x_1_6[6]; // (r0+r2)*1./6 float r0_1_4_add_r2_x_1_6[6]; // (r0*1/4 + r2)*1./6 float r1_1_6[6]; // r1*1/6 float r1_1_12[6]; // r1*1/12 float s_1_6 = 1. / 6.f; for (int j = 0; j < 3; j++) { neg_r0_add_r2_x_1_6[j] = -(ker[j] + ker[6 + j]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (ker[j] * 0.25 + ker[6 + j]) * s_1_6; r1_1_6[j] = ker[3 + j] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 3; j++) { tmp[j] = ker[j] * 0.25; tmp[3 + j] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[6 + j] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[9 + j] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[12 + j] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[15 + j] = ker[6 + j]; } // gemm(6,3,3,G,ker,tmp); done int idx; for (int j = 0; j < 6; j++) { idx = j * 3; neg_r0_add_r2_x_1_6[j] = -(tmp[idx] + tmp[idx + 2]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (tmp[idx] * 0.25 + tmp[idx + 2]) * s_1_6; r1_1_6[j] = tmp[idx + 1] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 6; j++) { idx = j * 6; trans_ker[idx] = tmp[j * 3] * 0.25; trans_ker[idx + 1] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 2] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 3] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 4] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 5] = tmp[j * 3 + 2]; } // gemm(6,6,3,tmp,GT,trans_ker); done } static inline void transform_kernel_f43_tile(struct ir_tensor* filter, float* trans_ker) { int outc = filter->dims[0]; int inc = filter->dims[1]; float* kernel = ( float* )filter->data; float* ker_ptr = trans_ker; for (int i = 0; i < outc; i++) { for (int j = 0; j < inc; j++) { trans_kernel_f43(( float* )(kernel + 9 * (j + i * inc)), ker_ptr); ker_ptr += ELEM_SIZE; } } } // ker0 [cout][cin][ELEM_SIZE] // ker1 [ELEM_SIZE][cout//KER_COUT_UNIT][cin][KER_COUT_UNIT] static inline void interleave_kernel_1(float* ker0, float* ker1, int cout, int cin) { int i,j; float* ker1_ptr = ker1; for(int s = 0; s < ELEM_SIZE; s++) { int p; //cout 16 for(p = 0; p < (cout& -KER_COUT_UNIT); p+=KER_COUT_UNIT){ INTERLEAVE_KERNEL_UNIT(p,KER_COUT_UNIT,cin,ker0,ker1_ptr,ELEM_SIZE,i,j,s); } //cout 4 for(p = (cout & -KER_COUT_UNIT); p < (cout & -KER_COUT_UNIT4); p += KER_COUT_UNIT4){ INTERLEAVE_KERNEL_UNIT(p,KER_COUT_UNIT4,cin,ker0,ker1_ptr,ELEM_SIZE,i,j,s); } // cout 1 for(p=(cout & -KER_COUT_UNIT4); p < cout; p ++){ INTERLEAVE_KERNEL_UNIT(p,1,cin,ker0,ker1_ptr,ELEM_SIZE,i,j,s); } } } static inline void pad_input1(const float* input, float* inp_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad0, int pad1) { int padded_hw = padded_h * padded_w; float* pad_ptr; float* inp_ptr = ( float* )input; int resi_h = padded_h - pad0 - inh; int resi_w = padded_w - pad1 - inw; for (int c = 0; c < inc; c++) { pad_ptr = inp_padded + c * padded_hw; // pad h_top memset(pad_ptr, 0, padded_w * pad0 * sizeof(float)); pad_ptr += pad0 * padded_w; // pad h_mid for (int h = 0; h < inh; h++) { // pad w_left memset(pad_ptr, 0, pad1 * sizeof(float)); // pad w_mid memcpy(pad_ptr + pad1, inp_ptr, inw * sizeof(float)); // pad w_end memset(pad_ptr + pad1 + inw, 0, resi_w * sizeof(float)); inp_ptr += inw; pad_ptr += padded_w; } // pad h_bottom memset(pad_ptr, 0, padded_w * resi_h * sizeof(float)); } } static inline void trans_inp_1tile(float* input, float* inp_ptr, int ih, int jw, int c, int in_hw, int inw) { float* inp = ( float* )input + c * in_hw + ih * 4 * inw + jw * 4; float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float tmp[36] = {0}; float r1_add_r2[6]; float r3_add_r4[6]; float r1_minus_r2[6]; float r3_minus_r4[6]; float r4_minus_r2[6]; float r1_minus_r3[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = inp1[j] + inp2[j]; r1_minus_r2[j] = inp1[j] - inp2[j]; r3_add_r4[j] = inp3[j] + inp4[j]; r3_minus_r4[j] = inp3[j] - inp4[j]; r4_minus_r2[j] = inp4[j] - inp2[j]; r1_minus_r3[j] = inp1[j] - inp3[j]; } for (int j = 0; j < 6; j++) { tmp[j] = 4 * inp0[j] - 5 * inp2[j] + inp4[j]; tmp[6 + j] = r3_add_r4[j] - 4 * r1_add_r2[j]; tmp[12 + j] = 4 * r1_minus_r2[j] - r3_minus_r4[j]; tmp[18 + j] = r4_minus_r2[j] - 2 * r1_minus_r3[j]; tmp[24 + j] = r4_minus_r2[j] + 2 * r1_minus_r3[j]; tmp[30 + j] = 4 * inp1[j] - 5 * inp3[j] + inp5[j]; } float r1_4_minus_r3[6]; float r4_minus_4_r2[6]; float r4_minus_r2_[6]; float r1_minus_r3_x2[6]; for (int j = 0; j < 6; j++) { r4_minus_r2_[j] = tmp[j * 6 + 4] - tmp[j * 6 + 2]; r1_4_minus_r3[j] = 4 * tmp[j * 6 + 1] - tmp[j * 6 + 3]; r4_minus_4_r2[j] = tmp[j * 6 + 4] - 4 * tmp[j * 6 + 2]; r1_minus_r3_x2[j] = 2 * (tmp[j * 6 + 1] - tmp[j * 6 + 3]); } for (int j = 0; j < 6; j++) { inp_ptr[j * 6] = 4 * tmp[j * 6] - 5 * tmp[j * 6 + 2] + tmp[j * 6 + 4]; inp_ptr[1 + j * 6] = r4_minus_4_r2[j] - r1_4_minus_r3[j]; inp_ptr[2 + j * 6] = r4_minus_4_r2[j] + r1_4_minus_r3[j]; inp_ptr[3 + j * 6] = r4_minus_r2_[j] - r1_minus_r3_x2[j]; inp_ptr[4 + j * 6] = r4_minus_r2_[j] + r1_minus_r3_x2[j]; inp_ptr[5 + j * 6] = 4 * tmp[j * 6 + 1] - 5 * tmp[j * 6 + 3] + tmp[j * 6 + 5]; } } static inline void trans_inp_4_cpu(float* inp, float* inp_ptr, int inw, int s_size) { float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float mid[36 * 4] = {0}; float r4_minus_r2[24]; float r1_4_minus_r3[24]; float r4_minus_4_r2[24]; float r1_minus_r3_x2[24]; for (int i = 0; i < 6; i++) { // 0 mid[i * 4] = 4 * inp0[i] - 5 * inp2[i] + inp4[i]; mid[(30 + i) * 4] = 4 * inp1[i] - 5 * inp3[i] + inp5[i]; r1_minus_r3_x2[i * 4 + 0] = (inp1[i] - inp3[i]) * 2; r1_4_minus_r3[i * 4 + 0] = 4 * inp1[i] - inp3[i]; r4_minus_4_r2[i * 4 + 0] = inp4[i] - 4 * inp2[i]; r4_minus_r2[i * 4 + 0] = inp4[i] - inp2[i]; // 1 mid[i * 4 + 1] = 4 * inp0[i + 4] - 5 * inp2[i + 4] + inp4[i + 4]; mid[(30 + i) * 4 + 1] = 4 * inp1[i + 4] - 5 * inp3[i + 4] + inp5[i + 4]; r1_minus_r3_x2[i * 4 + 1] = (inp1[i + 4] - inp3[i + 4]) * 2; r1_4_minus_r3[i * 4 + 1] = 4 * inp1[i + 4] - inp3[i + 4]; r4_minus_4_r2[i * 4 + 1] = inp4[i + 4] - 4 * inp2[i + 4]; r4_minus_r2[i * 4 + 1] = inp4[i + 4] - inp2[i + 4]; // 2 mid[i * 4 + 2] = 4 * inp0[i + 8] - 5 * inp2[i + 8] + inp4[i + 8]; mid[(30 + i) * 4 + 2] = 4 * inp1[i + 8] - 5 * inp3[i + 8] + inp5[i + 8]; r1_minus_r3_x2[i * 4 + 2] = (inp1[i + 8] - inp3[i + 8]) * 2; r1_4_minus_r3[i * 4 + 2] = 4 * inp1[i + 8] - inp3[i + 8]; r4_minus_4_r2[i * 4 + 2] = inp4[i + 8] - 4 * inp2[i + 8]; r4_minus_r2[i * 4 + 2] = inp4[i + 8] - inp2[i + 8]; // 3 mid[i * 4 + 3] = 4 * inp0[i + 12] - 5 * inp2[i + 12] + inp4[i + 12]; mid[(30 + i) * 4 + 3] = 4 * inp1[i + 12] - 5 * inp3[i + 12] + inp5[i + 12]; r1_minus_r3_x2[i * 4 + 3] = (inp1[i + 12] - inp3[i + 12]) * 2; r1_4_minus_r3[i * 4 + 3] = 4 * inp1[i + 12] - inp3[i + 12]; r4_minus_4_r2[i * 4 + 3] = inp4[i + 12] - 4 * inp2[i + 12]; r4_minus_r2[i * 4 + 3] = inp4[i + 12] - inp2[i + 12]; } //==================================================================== // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // mid[(6 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // mid[(12 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // mid[(18 + i) * 4 + k] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // mid[(24 + i) * 4 + k] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // } // } float32x4_t r0 = vld1q_f32(r4_minus_4_r2); float32x4_t r1 = vld1q_f32(r4_minus_4_r2 + 4); float32x4_t r2 = vld1q_f32(r4_minus_4_r2 + 8); float32x4_t r3 = vld1q_f32(r4_minus_4_r2 + 12); float32x4_t r4 = vld1q_f32(r4_minus_4_r2 + 16); float32x4_t r5 = vld1q_f32(r4_minus_4_r2 + 20); float32x4_t r0_ = vld1q_f32(r1_4_minus_r3); float32x4_t r1_ = vld1q_f32(r1_4_minus_r3 + 4); float32x4_t r2_ = vld1q_f32(r1_4_minus_r3 + 8); float32x4_t r3_ = vld1q_f32(r1_4_minus_r3 + 12); float32x4_t r4_ = vld1q_f32(r1_4_minus_r3 + 16); float32x4_t r5_ = vld1q_f32(r1_4_minus_r3 + 20); float32x4_t line0_0 = vld1q_f32(mid); float32x4_t line0_1 = vld1q_f32(mid + 4); float32x4_t line0_2 = vld1q_f32(mid + 8); float32x4_t line0_3 = vld1q_f32(mid + 12); float32x4_t line0_4 = vld1q_f32(mid + 16); float32x4_t line0_5 = vld1q_f32(mid + 20); float32x4_t line1_0 = vsubq_f32(r0, r0_); // mid[(6 + i) * 4 + k] [1][0] float32x4_t line1_1 = vsubq_f32(r1, r1_); // mid[(6 + i) * 4 + k] [1][1] float32x4_t line1_2 = vsubq_f32(r2, r2_); // mid[(6 + i) * 4 + k] [1][2] float32x4_t line1_3 = vsubq_f32(r3, r3_); // mid[(6 + i) * 4 + k] [1][3] float32x4_t line1_4 = vsubq_f32(r4, r4_); // mid[(6 + i) * 4 + k] [1][4] float32x4_t line1_5 = vsubq_f32(r5, r5_); // mid[(6 + i) * 4 + k] [1][5] float32x4_t line2_0 = vaddq_f32(r0, r0_); // mid[(12 + i) * 4 + k] [2][0] float32x4_t line2_1 = vaddq_f32(r1, r1_); // mid[(12 + i) * 4 + k] [2][1] float32x4_t line2_2 = vaddq_f32(r2, r2_); // mid[(12 + i) * 4 + k] [2][2] float32x4_t line2_3 = vaddq_f32(r3, r3_); // mid[(12 + i) * 4 + k] [2][3] float32x4_t line2_4 = vaddq_f32(r4, r4_); // mid[(12 + i) * 4 + k] [2][4] float32x4_t line2_5 = vaddq_f32(r5, r5_); // mid[(12 + i) * 4 + k] [2][5] r0 = vld1q_f32(r4_minus_r2); r1 = vld1q_f32(r4_minus_r2 + 4); r2 = vld1q_f32(r4_minus_r2 + 8); r3 = vld1q_f32(r4_minus_r2 + 12); r4 = vld1q_f32(r4_minus_r2 + 16); r5 = vld1q_f32(r4_minus_r2 + 20); r0_ = vld1q_f32(r1_minus_r3_x2); r1_ = vld1q_f32(r1_minus_r3_x2 + 4); r2_ = vld1q_f32(r1_minus_r3_x2 + 8); r3_ = vld1q_f32(r1_minus_r3_x2 + 12); r4_ = vld1q_f32(r1_minus_r3_x2 + 16); r5_ = vld1q_f32(r1_minus_r3_x2 + 20); float32x4_t line5_0 = vld1q_f32(mid + 120); float32x4_t line5_1 = vld1q_f32(mid + 124); float32x4_t line5_2 = vld1q_f32(mid + 128); float32x4_t line5_3 = vld1q_f32(mid + 132); float32x4_t line5_4 = vld1q_f32(mid + 136); float32x4_t line5_5 = vld1q_f32(mid + 140); float32x4_t line3_0 = vsubq_f32(r0, r0_); // mid[(18 + i) * 4 + k] [3][0] float32x4_t line3_1 = vsubq_f32(r1, r1_); // mid[(18 + i) * 4 + k] [3][1] float32x4_t line3_2 = vsubq_f32(r2, r2_); // mid[(18 + i) * 4 + k] [3][2] float32x4_t line3_3 = vsubq_f32(r3, r3_); // mid[(18 + i) * 4 + k] [3][3] float32x4_t line3_4 = vsubq_f32(r4, r4_); // mid[(18 + i) * 4 + k] [3][4] float32x4_t line3_5 = vsubq_f32(r5, r5_); // mid[(18 + i) * 4 + k] [3][5] float32x4_t line4_0 = vaddq_f32(r0, r0_); // mid[(24 + i) * 4 + k] [4][0] float32x4_t line4_1 = vaddq_f32(r1, r1_); // mid[(24 + i) * 4 + k] [4][1] float32x4_t line4_2 = vaddq_f32(r2, r2_); // mid[(24 + i) * 4 + k] [4][2] float32x4_t line4_3 = vaddq_f32(r3, r3_); // mid[(24 + i) * 4 + k] [4][3] float32x4_t line4_4 = vaddq_f32(r4, r4_); // mid[(24 + i) * 4 + k] [4][4] float32x4_t line4_5 = vaddq_f32(r5, r5_); // mid[(24 + i) * 4 + k] [4][5] // r4_minus_r2[i * 4 + k] i=0 = mid[0][4] r0 = vsubq_f32(line0_4, line0_2); r1 = vsubq_f32(line1_4, line1_2); r2 = vsubq_f32(line2_4, line2_2); r3 = vsubq_f32(line3_4, line3_2); r4 = vsubq_f32(line4_4, line4_2); r5 = vsubq_f32(line5_4, line5_2); r0_ = vsubq_f32(line0_1, line0_3); r1_ = vsubq_f32(line1_1, line1_3); r2_ = vsubq_f32(line2_1, line2_3); r3_ = vsubq_f32(line3_1, line3_3); r4_ = vsubq_f32(line4_1, line4_3); r5_ = vsubq_f32(line5_1, line5_3); float32x4_t const2 = vdupq_n_f32(2.f); r0_ = vmulq_f32(r0_, const2); r1_ = vmulq_f32(r1_, const2); r2_ = vmulq_f32(r2_, const2); r3_ = vmulq_f32(r3_, const2); r4_ = vmulq_f32(r4_, const2); r5_ = vmulq_f32(r5_, const2); vst1q_f32(inp_ptr + s_size * 3, vsubq_f32(r0, r0_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 9, vsubq_f32(r1, r1_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 15, vsubq_f32(r2, r2_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 21, vsubq_f32(r3, r3_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 27, vsubq_f32(r4, r4_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 33, vsubq_f32(r5, r5_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 4, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 10, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 16, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 22, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 28, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 34, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (4 + i * 6)] float32x4_t const4 = vdupq_n_f32(4.f); float32x4_t const5 = vdupq_n_f32(-5.f); r0_ = vmulq_f32(line0_1, const4); // line 1*4 ======== r1_ = vmulq_f32(line1_1, const4); r2_ = vmulq_f32(line2_1, const4); r3_ = vmulq_f32(line3_1, const4); r4_ = vmulq_f32(line4_1, const4); r5_ = vmulq_f32(line5_1, const4); float32x4_t rr0_ = vsubq_f32(r0_, line0_3); // line1*4-line3 float32x4_t rr1_ = vsubq_f32(r1_, line1_3); float32x4_t rr2_ = vsubq_f32(r2_, line2_3); float32x4_t rr3_ = vsubq_f32(r3_, line3_3); float32x4_t rr4_ = vsubq_f32(r4_, line4_3); float32x4_t rr5_ = vsubq_f32(r5_, line5_3); r0 = vmulq_f32(line0_2, const4); r1 = vmulq_f32(line1_2, const4); r2 = vmulq_f32(line2_2, const4); r3 = vmulq_f32(line3_2, const4); r4 = vmulq_f32(line4_2, const4); r5 = vmulq_f32(line5_2, const4); r0 = vsubq_f32(line0_4, r0); // line4 -4*line2 r1 = vsubq_f32(line1_4, r1); r2 = vsubq_f32(line2_4, r2); r3 = vsubq_f32(line3_4, r3); r4 = vsubq_f32(line4_4, r4); r5 = vsubq_f32(line5_4, r5); vst1q_f32(inp_ptr + s_size * 1, vsubq_f32(r0, rr0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 7, vsubq_f32(r1, rr1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 13, vsubq_f32(r2, rr2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 19, vsubq_f32(r3, rr3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 25, vsubq_f32(r4, rr4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 31, vsubq_f32(r5, rr5_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 2, vaddq_f32(r0, rr0_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 8, vaddq_f32(r1, rr1_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 14, vaddq_f32(r2, rr2_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 20, vaddq_f32(r3, rr3_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 26, vaddq_f32(r4, rr4_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 32, vaddq_f32(r5, rr5_)); // inp_ptr[ s_size * (2 + i * 6)] r0_ = vaddq_f32(line0_5, r0_); // 5 + 1*4 r1_ = vaddq_f32(line1_5, r1_); r2_ = vaddq_f32(line2_5, r2_); r3_ = vaddq_f32(line3_5, r3_); r4_ = vaddq_f32(line4_5, r4_); r5_ = vaddq_f32(line5_5, r5_); r0 = vmulq_f32(line0_3, const5); r1 = vmulq_f32(line1_3, const5); r2 = vmulq_f32(line2_3, const5); r3 = vmulq_f32(line3_3, const5); r4 = vmulq_f32(line4_3, const5); r5 = vmulq_f32(line5_3, const5); vst1q_f32(inp_ptr + s_size * 5, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 11, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 17, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 23, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 29, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 35, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (5 + i * 6)] r0 = vmulq_f32(line0_0, const4); r1 = vmulq_f32(line1_0, const4); r2 = vmulq_f32(line2_0, const4); r3 = vmulq_f32(line3_0, const4); r4 = vmulq_f32(line4_0, const4); r5 = vmulq_f32(line5_0, const4); r0_ = vmulq_f32(line0_2, const5); r1_ = vmulq_f32(line1_2, const5); r2_ = vmulq_f32(line2_2, const5); r3_ = vmulq_f32(line3_2, const5); r4_ = vmulq_f32(line4_2, const5); r5_ = vmulq_f32(line5_2, const5); r0 = vaddq_f32(r0, line0_4); r1 = vaddq_f32(r1, line1_4); r2 = vaddq_f32(r2, line2_4); r3 = vaddq_f32(r3, line3_4); r4 = vaddq_f32(r4, line4_4); r5 = vaddq_f32(r5, line5_4); vst1q_f32(inp_ptr + s_size * 0, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 6, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 12, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 18, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 24, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 30, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (1 + i * 6)] // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // r4_minus_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - mid[(i * 6 + 2) * 4 + k]; // r1_4_minus_r3[i * 4 + k] = 4 * mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]; // r4_minus_4_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - 4 * mid[(i * 6 + 2) * 4 + k]; // r1_minus_r3_x2[i * 4 + k] = 2 * (mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]); // } // } // for(int i = 1; i < 2; i++) // { // for(int k = 0; k < 4; k++) // { // inp_ptr[k + s_size * (i * 6)] = // 4 * mid[(i * 6) * 4 + k] - 5 * mid[(i * 6 + 2) * 4 + k] + mid[(i * 6 + 4) * 4 + k]; // // // inp_ptr[k + s_size * (1 + i * 6)] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (2 + i * 6)] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (3 + i * 6)] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (4 + i * 6)] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (5 + i * 6)] = // // // 4 * mid[(i * 6 + 1) * 4 + k] - 5 * mid[(i * 6 + 3) * 4 + k] + mid[(i * 6 + 5) * 4 + k]; // } // } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] static inline void tran_input_4block(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh, int inw) { int in_hw = inh * inw; int block_hw = block_h * block_w; int nn_block = block_hw >> 2; int idxh[4]; int idxw[4]; for (int ib = 0; ib < nn_block; ib++) { float* inp_ptr_4tile = trans_inp + ib * 4 * ELEM_SIZE * inc; idxh[0] = (ib * 4) / block_w; idxh[1] = (ib * 4 + 1) / block_w; idxh[2] = (ib * 4 + 2) / block_w; idxh[3] = (ib * 4 + 3) / block_w; idxw[0] = (ib * 4) % block_w; idxw[1] = (ib * 4 + 1) % block_w; idxw[2] = (ib * 4 + 2) % block_w; idxw[3] = (ib * 4 + 3) % block_w; if (idxh[0] == idxh[3]) { float* temp_inp_ptr = ( float* )(input + idxh[0] * 4 * inw + idxw[0] * 4); for (int c = 0; c < inc; c++) { #ifdef __aarch64__ float ker00[4] = {1, 2, 4, 5}; tran_inp_4(temp_inp_ptr, inp_ptr_4tile + 4 * c, ker00, inw, inc * 16, in_hw); temp_inp_ptr += in_hw; #else trans_inp_4_cpu(temp_inp_ptr, inp_ptr_4tile + c * 4, inw, inc * 4); temp_inp_ptr += in_hw; #endif } } else { float buffer0[inc * ELEM_SIZE * 4]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { trans_inp_1tile(( float* )input, buffer, idxh[0], idxw[0], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[1], idxw[1], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[2], idxw[2], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[3], idxw[3], c, in_hw, inw); buffer += ELEM_SIZE; } // interleave float* tmp_inp = inp_ptr_4tile; for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { for (int j = 0; j < 4; j++) { *tmp_inp = buffer0[i * ELEM_SIZE * 4 + j * ELEM_SIZE + s]; tmp_inp++; } } } // end interleave } } } // tran_inp [block_hw/4][36][inc][4] -> [36][block_hw/4][inc][4] static inline void tran_input_4block_1(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh, int inw) { int in_hw = inh * inw; int block_hw = block_h * block_w; int nn_block = block_hw >> 2; int idxh[4]; int idxw[4]; int s_size = block_hw * inc * sizeof(float); for(int ib = 0; ib < nn_block; ib++) { int off_set0 = ib * BLOCK_HW_UNIT * inc; idxh[0] = (ib * 4) / block_w; idxh[1] = (ib * 4 + 1) / block_w; idxh[2] = (ib * 4 + 2) / block_w; idxh[3] = (ib * 4 + 3) / block_w; idxw[0] = (ib * 4) % block_w; idxw[1] = (ib * 4 + 1) % block_w; idxw[2] = (ib * 4 + 2) % block_w; idxw[3] = (ib * 4 + 3) % block_w; if(idxh[0] == idxh[3]) { float* temp_inp_ptr = ( float* )(input + idxh[0] * 4 * inw + idxw[0] * 4); for(int c = 0; c < inc; c++) { float ker00[4] = {1, 2, 4, 5}; tran_inp_4(temp_inp_ptr, trans_inp + c * 4 + off_set0, ker00, inw, s_size, in_hw); temp_inp_ptr += in_hw; } } else { float buffer0[inc * ELEM_SIZE * BLOCK_HW_UNIT]; float* buffer = buffer0; for(int c = 0; c < inc; c++) { trans_inp_1tile(( float* )input, buffer, idxh[0], idxw[0], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[1], idxw[1], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[2], idxw[2], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[3], idxw[3], c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for(int s = 0; s < ELEM_SIZE; s++) { float* tmp_inp = trans_inp + s * block_hw * inc + off_set0; for(int i = 0; i < inc; i++) { for(int j = 0; j < BLOCK_HW_UNIT; j++) { *tmp_inp = buffer0[i * ELEM_SIZE * BLOCK_HW_UNIT + j * ELEM_SIZE + s]; tmp_inp++; } } } // end interleave } } } static inline void tran_input_resi_block(const float* input, float* trans_inp, int inc, int nn_block, int resi_block, int block_hw, int block_w, int in_hw, int inw) { float* inp_ptr = trans_inp + nn_block * 4 * ELEM_SIZE * inc; for (int ib = resi_block; ib < block_hw; ib++) { float buffer0[ELEM_SIZE * inc]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { int ih = ib / block_w; int jw = ib % block_w; trans_inp_1tile(( float* )input, buffer, ih, jw, c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { *inp_ptr = buffer0[i * ELEM_SIZE + s]; inp_ptr++; } } // end interleave } } // tran_inp [block_resi][36][inc] -> [36][block_resi][inc] static inline void tran_input_resi_block_1(const float* input, float* trans_inp, int inc, int nn_block, int resi_block, int block_hw, int block_w, int in_hw, int inw) { for(int ib = resi_block; ib < block_hw; ib++) { int off_set0 = ib * inc; float buffer0[ELEM_SIZE * inc]; float* buffer = buffer0; for(int c = 0; c < inc; c++) { int ih = ib / block_w; int jw = ib % block_w; trans_inp_1tile(( float* )input, buffer, ih, jw, c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for(int s = 0; s < ELEM_SIZE; s++) { float* tmp_inp = trans_inp + s * block_hw * inc + off_set0; for(int i = 0; i < inc; i++) { *tmp_inp = buffer0[i * ELEM_SIZE + s]; tmp_inp++; } } // end interleave } } static inline float do_activation(float value, int activation) { if (activation >= 0) value = WINO_MAX(value, 0); if (activation == 6) value = WINO_MIN(value, 6); return value; } static inline void trans_output_f43(const float* mid, float* out, int outw, const float* bias_ptr, int activation) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float* out0 = out; float* out1 = out0 + outw; float* out2 = out1 + outw; float* out3 = out2 + outw; float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0] + bias, activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1] + bias, activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2] + bias, activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3] + bias, activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0] + bias, activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1] + bias, activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2] + bias, activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3] + bias, activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0] + bias, activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1] + bias, activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2] + bias, activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3] + bias, activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5] + bias, activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5] + bias, activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5] + bias, activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5] + bias, activation); } else { out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0], activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1], activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2], activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3], activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0], activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1], activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2], activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3], activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0], activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1], activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2], activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3], activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5], activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5], activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5], activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5], activation); } } static inline void trans_output_f43_ordinary(const float* mid, float* out, const float* bias_ptr) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = bias + tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = bias + _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = bias + _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = bias + _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } else { for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } } static inline void transform_output_f43_1tile(const float* buffer_ptr, float* out, int p_idx, int idx_blockhw, int block_h, int block_w, int out_hw, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; if (bias) { bias_ptr = (bias + cout_idx); } float* out_ptr = out + cout_idx * out_hw; int i_h = idx_blockhw / block_w; int j_w = idx_blockhw % block_w; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * TILE + ww], activation); } } } buffer_ptr += ELEM_SIZE; } } static inline void transform_output_f43_4tile(float* buffer_ptr, float* out, int p_idx, int block_idx, int block_h, int block_w, int outh, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { int out_hw = outh * outw; float tmp_buffer[TILE * TILE]; int idx_h[4]; int idx_w[4]; idx_h[0] = (block_idx) / block_w; idx_h[1] = (block_idx + 1) / block_w; idx_h[2] = (block_idx + 2) / block_w; idx_h[3] = (block_idx + 3) / block_w; idx_w[0] = (block_idx) % block_w; idx_w[1] = (block_idx + 1) % block_w; idx_w[2] = (block_idx + 2) % block_w; idx_w[3] = (block_idx + 3) % block_w; float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; float* out_ptr = out + cout_idx * out_hw; if (bias) { bias_ptr = ( float* )bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff buffer_ptr += ELEM_SIZE; } } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] // kernel [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN] static void wino_sgemm_4x16_1(const float* ker, const float* inp, float* output, int cin, int cout_end, int block_h, int block_w, int out_c, int num_thread, int s, int cpu_affinity) { int block_hw = block_h * block_w; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < (cout_end & -PER_OUT_CHAN); p += PER_OUT_CHAN) { float * out_ptr = output + p * ELEM_SIZE * block_hw; float * out_ptr1 ; int i; for (i = 0; i < (block_hw & -4); i += 4) { out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT; int offset = s * block_hw * cin + i * cin; int offset_ker = s * cin * out_c + p * cin; //#ifdef __aarch64__ wino_sgemm_4x16_A72(out_ptr1 + s * BLOCK_HW_UNIT, inp + offset, ker + offset_ker, cin, 1); } for(; i < block_hw ;i++) { out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT; int offset_ker = s * cin * out_c + p * cin; int offset = s * block_hw * cin + i * cin; wino_sgemm_1x16(out_ptr1 + s * KER_COUT_UNIT, inp + offset, ker + offset_ker, cin); } } } void wino_sgemm_4x4_1(const float* ker, const float* inp, float* output, int cin, int cout_start, int cout_end, int block_h, int block_w, int out_c, int activation, int s, int num_thread, int cpu_affinity) { int p, i; float* out_ptr; float* out_ptr1; int block_start = 0; int block_hw = block_h * block_w; int block_end = block_hw; for (p = (cout_start & -KER_COUT_UNIT4); p < (cout_end & -KER_COUT_UNIT4); p += KER_COUT_UNIT4) { out_ptr = output + p * ELEM_SIZE * cin; for(i = (block_start & -4); i < (block_end & -4); i += 4) { out_ptr1 = out_ptr + i * ELEM_SIZE * cin; int offset = s * block_hw * cin + i * cin; int offset_ker = s * cin * out_c + p * cin; //#ifdef __aarch64__ wino_sgemm_4x4_A72(out_ptr1 + s * BLOCK_HW_UNIT, inp + offset, ker + offset_ker, cin, 1); } for(; i < block_end; i++) { out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT4; int offset_ker = s * cin * out_c + p * cin; int offset = s * block_hw * cin + i * cin; wino_sgemm_1x4(out_ptr1 + s * KER_COUT_UNIT4, inp + offset, ker + offset_ker, cin); } } for(p = (cout_end & -KER_COUT_UNIT4); p < cout_end; p ++){ out_ptr = output + p * ELEM_SIZE * block_hw; float* ker_ = (float*)(ker + s * cin * out_c + p * cin); for(i = (block_start & -4); i < (block_end & -4); i += 4){ out_ptr1 = out_ptr + i * ELEM_SIZE + s*BLOCK_HW_UNIT; float* inp_ = (float*)(inp + s * block_hw * cin + i*cin); float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for(int k = 0; k < cin; k++){ sum0 += inp_[k * 4 ] * ker_[k]; sum1 += inp_[k * 4 + 1] * ker_[k]; sum2 += inp_[k * 4 + 2] * ker_[k]; sum3 += inp_[k * 4 + 3] * ker_[k]; } out_ptr1[0] = sum0; out_ptr1[1] = sum1; out_ptr1[2] = sum2; out_ptr1[3] = sum3; } for(; i < block_end; i++){ out_ptr1 = out_ptr + i * ELEM_SIZE + s; float* inp_ = (float*)(inp + s * block_hw * cin + i*cin); float sum0 = 0; for(int k = 0; k < cin; k++){ sum0 += inp_[k] * ker_[k]; } out_ptr1[0] = sum0; } } } /* transform output */ static inline void trans_output_p(float* trans_out_ptr, float* output, float* bias, int bias_term, int block_h, int block_w, int block_hw, int out_hw, int out_w, int resi_h, int resi_w, int activation,int p,int KER_COUT_UNIT_) { int flag_outw = 1; if(out_w < 16) flag_outw = 0; int i; for(i=0; i< (block_hw & -BLOCK_HW_UNIT); i+=BLOCK_HW_UNIT){ float* buffer_ptr = trans_out_ptr + i * KER_COUT_UNIT_ * ELEM_SIZE; int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; int wino_out_4_tiles = 0; if(flag_outw){ if((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1))){ wino_out_4_tiles = 1; } } if(wino_out_4_tiles == 1){ float* bias_ptr = NULL; for(int pss = 0; pss < KER_COUT_UNIT_; pss++){ int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE; if(bias_term){ bias_ptr = ( float* )(bias + cout_idx); } float ker00[4] = {2, 4, 8, 0}; tran_out_4(buffer_ptr + pss * ELEM_SIZE * BLOCK_HW_UNIT, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation); } } else{ float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for(int pss = 0; pss < KER_COUT_UNIT_; pss++){ int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw; if(bias_term){ bias_ptr = bias + cout_idx; } float buffer[BLOCK_HW_UNIT * ELEM_SIZE]; float* buffer_ptr0 = buffer; float* mid_ptr = buffer_ptr + pss * BLOCK_HW_UNIT * ELEM_SIZE; for(int t = 0; t < BLOCK_HW_UNIT; t++){ for(int ss = 0; ss < ELEM_SIZE; ss++){ *buffer_ptr0 = mid_ptr[ss * BLOCK_HW_UNIT + t]; buffer_ptr0++; } } for(int ii = 0; ii < BLOCK_HW_UNIT; ii++){ int i_h = idx_h[ii]; int j_w = idx_w[ii]; if((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))){ trans_output_f43(buffer + ii * ELEM_SIZE, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, ( const float* )bias_ptr, activation); } else{ int ret_h = TILE - resi_h; if(i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if(j_w < block_w - 1) ret_w = TILE; trans_output_f43_ordinary(buffer + ii * ELEM_SIZE, tmp_buffer, ( const float* )bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for(int hh = 0; hh < ret_h; hh++){ for(int ww = 0; ww < ret_w; ww++){ out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } } } } } for(; i < block_hw; i++){ float* buffer_ptr = trans_out_ptr + i * KER_COUT_UNIT_ * ELEM_SIZE; float resi_buffer[KER_COUT_UNIT_ * ELEM_SIZE]; float* buffer0 = resi_buffer; for(int pp = 0; pp < KER_COUT_UNIT_; pp++){ for(int ss = 0; ss < ELEM_SIZE; ss++){ *buffer0 = buffer_ptr[ss * KER_COUT_UNIT_ + pp]; buffer0++; } } transform_output_f43_1tile(resi_buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, KER_COUT_UNIT_, bias, activation); } } // transform output static inline void trans_output_1(float* trans_out, float* output, float* bias, int bias_term, int block_h, int block_w, int cout_start, int cout_end, int out_hw, int out_w, int resi_h, int resi_w, int activation) { int block_hw = block_h * block_w; int p; //cout 16 for(p = cout_start; p < (cout_end& -KER_COUT_UNIT); p+=KER_COUT_UNIT){ trans_output_p(trans_out + p * block_hw * ELEM_SIZE, output, bias, bias_term, block_h, block_w, block_hw, out_hw, out_w, resi_h, resi_w, activation, p, KER_COUT_UNIT); } //cout 4 for(p = (cout_end & -KER_COUT_UNIT); p < (cout_end & -KER_COUT_UNIT4); p += KER_COUT_UNIT4){ trans_output_p(trans_out + p * block_hw * ELEM_SIZE, output, bias, bias_term, block_h, block_w, block_hw, out_hw, out_w, resi_h, resi_w, activation, p, KER_COUT_UNIT4); } // cout 1 for(p=(cout_end & -KER_COUT_UNIT4); p < cout_end; p ++){ trans_output_p(trans_out + p * block_hw * ELEM_SIZE, output, bias, bias_term, block_h, block_w, block_hw, out_hw, out_w, resi_h, resi_w, activation, p, 1); } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } int wino_conv_hcl_prerun_1(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { // fprintf(stderr,"run into wino_1 prerun.\n"); int output_c = filter_tensor->dims[0]; int input_c = filter_tensor->dims[1]; int mem_size = get_private_mem_size(filter_tensor, param); float* trans_mem = ( float* )sys_malloc(mem_size); if (!priv_info->external_interleave_mem) { void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } transform_kernel_f43_tile(filter_tensor, trans_mem); interleave_kernel_1(trans_mem, ( float* )priv_info->interleave_buffer, output_c, input_c); sys_free(trans_mem); return 0; } int wino_conv_hcl_run_1(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; // pad int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; // input int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; // output int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input_buf = ( float* )input_tensor->data; float* output_buf = ( float* )output_tensor->data; float* biases_buf = NULL; if (bias_tensor != NULL) biases_buf = ( float* )bias_tensor->data; float* col_buf = ( float* )priv_info->im2col_buffer; float* interleave_buf = ( float* )priv_info->interleave_buffer; int inp_padded_size = sizeof(float) * (in_c * padded_in_hw + 2); int nn_out_c = (out_c / PER_OUT_CHAN) * PER_OUT_CHAN; int nn_block = block_hw >> 2; int resi_block = nn_block << 2; int resi_h = block_h * TILE - out_h; int resi_w = block_w * TILE - out_w; for (int n = 0; n < batch; n++) { float* input_padded = ( float* )sys_malloc(inp_padded_size); float* trans_inp = ( float* )sys_malloc(sizeof(float) * ELEM_SIZE * in_c * block_hw + 128); float* trans_out = ( float* )sys_malloc(sizeof(float) * ELEM_SIZE * out_c * block_hw); float* input = input_buf + n * input_size; float* output = output_buf + n * output_size; /* PAD input */ pad_input1(input, input_padded, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_h0, pad_w0); /* trans input */ tran_input_4block_1(input_padded, trans_inp, in_c, block_h, block_w, padded_in_h, padded_in_w); if (resi_block != block_hw) { tran_input_resi_block_1(input_padded, trans_inp, in_c, nn_block, resi_block, block_hw, block_w, padded_in_hw, padded_in_w); } sys_free(input_padded); /* gemm */ for(int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x16_1(interleave_buf, trans_inp, trans_out, in_c, nn_out_c, block_h, block_w, out_c, num_thread, s, cpu_affinity); if (nn_out_c != out_c) { wino_sgemm_4x4_1(interleave_buf, trans_inp, trans_out, in_c, nn_out_c, out_c, block_h, block_w, out_c, act_type, s ,num_thread, cpu_affinity); } } sys_free(trans_inp); trans_output_1(trans_out, output, biases_buf, 0, block_h, block_w, 0, out_c, out_hw, out_w, resi_h, resi_w, act_type); sys_free(trans_out); } return 0; } #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(24*t3+Nx+20,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),64*t4+62),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
Par-07-ParallelOmpForNestedOmpParallelFor.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {0, 0, 0, 0}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; #pragma omp parallel for for (int j = 0; j < 4; ++j) { b[j] += a[i]; } } } return 0; }
GxB_BinaryOp_ztype_name.c
//------------------------------------------------------------------------------ // GxB_BinaryOp_ztype_name: return the type_name of y for z=f(x,y) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_BinaryOp_ztype_name // return the name of the type of x ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_BinaryOp binaryop ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_BinaryOp_ztype_name (type_name, op)") ; GB_RETURN_IF_NULL (type_name) ; GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ; ASSERT_BINARYOP_OK (binaryop, "binaryop for ztype_name", GB0) ; //-------------------------------------------------------------------------- // get the type_name //-------------------------------------------------------------------------- memcpy (type_name, binaryop->ztype->name, GxB_MAX_NAME_LEN) ; #pragma omp flush return (GrB_SUCCESS) ; }
custom_functions.h
// // Project Name: Kratos // Last Modified by: $Author: G.Casas (gcasas@cimmne.upc.edu) $ // Date: $Date: 2011-6-13 08:56:42 $ // Revision: $Revision: 1.5 $ // // //README::::look to the key word "VERSION" if you want to find all the points where you have to change something so that you can pass from a kdtree to a bin data search structure; #if !defined(KRATOS_CUSTOM_FUNCTIONS) #define KRATOS_CUSTOM_FUNCTIONS // /* External includes */ #ifdef _OPENMP #include <omp.h> #endif // System includes #include <vector> // Project includes #include "includes/model_part.h" #include "utilities/timer.h" #include "utilities/openmp_utils.h" #include "processes/find_elements_neighbours_process.h" #include "processes/find_nodal_neighbours_process.h" //Database includes #include "custom_utilities/search/discrete_particle_configure.h" #include "includes/define.h" #include "../../DEMApplication/custom_elements/discrete_element.h" #include "custom_elements/swimming_particle.h" #include "custom_utilities/AuxiliaryFunctions.h" #include "../../DEMApplication/custom_elements/spheric_particle.h" #include "../swimming_DEM_application.h" #include "../../../kratos/utilities/geometry_utilities.h" namespace Kratos { template <std::size_t TDim> class CustomFunctionsCalculator { public: typedef ModelPart::ElementsContainerType::iterator ElementIterator; typedef ModelPart::NodesContainerType::iterator NodeIterator; typedef ModelPart::NodesContainerType NodesArrayType; KRATOS_CLASS_POINTER_DEFINITION(CustomFunctionsCalculator); CustomFunctionsCalculator(): mPressuresFilled(false), mFirstGradientRecovery(true), mFirstLaplacianRecovery(true), mSomeCloudsDontWork(false), mCalculatingTheGradient(false), mCalculatingTheLaplacian(false), mFirstTimeAppending(true){} /// Calculator virtual ~CustomFunctionsCalculator(){} /// Default calculator //************************************************************************************************************************************************** //************************************************************************************************************************************************** void CalculatePressureGradient(ModelPart& r_model_part) { for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){ noalias(inode->FastGetSolutionStepValue(PRESSURE_GRADIENT)) = ZeroVector(3); } array_1d <double, 3> grad = ZeroVector(3); // its dimension is always 3 array_1d <double, TDim + 1 > elemental_pressures; array_1d <double, TDim + 1 > N; // shape functions vector BoundedMatrix<double, TDim + 1, TDim> DN_DX; for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){ // computing the shape function derivatives Geometry<Node<3> >& geom = ielem->GetGeometry(); double Volume; GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); // getting the pressure gradients; for (unsigned int i = 0; i < TDim + 1; ++i){ elemental_pressures[i] = geom[i].FastGetSolutionStepValue(PRESSURE); } array_1d <double, TDim> grad_aux = prod(trans(DN_DX), elemental_pressures); // its dimension may be 2 for (unsigned int i = 0; i < TDim; ++i){ grad[i] = grad_aux[i]; } double nodal_area = Volume / static_cast<double>(TDim + 1); grad *= nodal_area; for (unsigned int i = 0; i < TDim + 1; ++i){ geom[i].FastGetSolutionStepValue(PRESSURE_GRADIENT) += grad; } } for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){ inode->FastGetSolutionStepValue(PRESSURE_GRADIENT) /= inode->FastGetSolutionStepValue(NODAL_AREA); } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // This function assesses the stationarity based on the pressure field variation. // Its tolerance applies to the non-dimensional pressure variation between consecutive // measurements. bool AssessStationarity(ModelPart& r_model_part, const double& tol) { if (!mPressuresFilled){ PerformFirstStepComputations(r_model_part); return(false); } else { double max_pressure_change_rate = 0.0; // measure of stationarity double mean_celerity = 0.0; // used to adimensionalize the time step // filling up mPressures and calculating the mean velocities and the maximum nodal pressure change unsigned int i = 0; for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){ const array_1d<double, 3>& velocity = inode->FastGetSolutionStepValue(VELOCITY); mean_celerity += SWIMMING_MODULUS_3(velocity); const double new_pressure = inode->FastGetSolutionStepValue(PRESSURE); double& old_pressure = mPressures[i]; const double delta_p = std::abs(new_pressure - old_pressure); max_pressure_change_rate = std::max(delta_p, max_pressure_change_rate); old_pressure = new_pressure; ++i; } mean_celerity /= i; const double delta_t = r_model_part.GetProcessInfo()[TIME] - mLastMeasurementTime; if (delta_t > 0.0){ max_pressure_change_rate /= delta_t; // calculating coefficients for adimensionalization of the pressure change rate const double characteristic_length = std::pow(mTotalDomainVolume, 1.0 / 3); // characteristic length of the model. Should be improved: a hydraulic radius or such const double reciprocal_of_characteristic_time = mean_celerity / characteristic_length; const double pressure_spatial_variation = GetRangeWithinVector(mPressures); mLastPressureVariation = pressure_spatial_variation; const double characteristic_pressure_variation = 0.5 * (pressure_spatial_variation + mLastPressureVariation); if (characteristic_pressure_variation == 0.0 || reciprocal_of_characteristic_time == 0.0){ // unlikely std::cout << "Uniform problem: stationarity check being performed with dimensional values...! " << "\n"; if (max_pressure_change_rate <= tol){ // go with the absolute value return true; } } max_pressure_change_rate /= reciprocal_of_characteristic_time * characteristic_pressure_variation ; } else { KRATOS_THROW_ERROR(std::runtime_error, "Trying to calculate pressure variations between two coincident time steps! (null time variation since last recorded time)",""); } std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n"; std::cout << "The stationarity condition tolerance is " << "\n"; KRATOS_WATCH(tol) std::cout << "The stationarity residual is now " << "\n"; KRATOS_WATCH(max_pressure_change_rate) std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n"; return max_pressure_change_rate <= tol; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateDomainVolume(ModelPart& r_fluid_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_volume = 0.0; #pragma omp parallel for reduction(+ : added_volume) for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){ added_volume += CalculateElementalVolume(it->GetGeometry()); } } return added_volume; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // this function assumes linear elements are used void CalculateTotalHydrodynamicForceOnParticles(ModelPart& r_dem_model_part, array_1d <double, 3>& force) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_dem_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); std::vector<array_1d <double, 3> > added_force_vect; added_force_vect.resize(OpenMPUtils::GetNumThreads()); for (unsigned int k = 0; k < added_force_vect.size(); ++k){ added_force_vect[k] = ZeroVector(3); } #pragma omp parallel for for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_dem_model_part, k); it != GetElementPartitionEnd(r_dem_model_part, k); ++it){ Geometry< Node<3> >& geom = it->GetGeometry(); array_1d <double, 3> element_force; if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_FORCE)){ element_force = geom[0].FastGetSolutionStepValue(HYDRODYNAMIC_FORCE); } else { element_force = ZeroVector(3); } added_force_vect[k] += element_force; } } force = added_force_vect[0]; for (unsigned int k = 1; k < added_force_vect.size(); ++k){ force += added_force_vect[k]; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // this function assumes linear elements are used void CalculateTotalHydrodynamicForceOnFluid(ModelPart& r_fluid_model_part, array_1d <double, 3>& instantaneous_force, array_1d <double, 3>& mean_force) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); std::vector<array_1d <double, 3> > added_force_vect; added_force_vect.resize(OpenMPUtils::GetNumThreads()); std::vector<array_1d <double, 3> > added_mean_force_vect; added_mean_force_vect.resize(OpenMPUtils::GetNumThreads()); for (unsigned int k = 0; k < added_force_vect.size(); ++k){ added_force_vect[k] = ZeroVector(3); added_mean_force_vect[k] = ZeroVector(3); } #pragma omp parallel for for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){ Geometry< Node<3> >& geom = it->GetGeometry(); double element_volume; array_1d <double, 3> element_force; array_1d <double, 3> element_mean_force; if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){ element_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, HYDRODYNAMIC_REACTION, element_volume); } else { element_force = ZeroVector(3); } if (geom[0].SolutionStepsDataHas(MEAN_HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){ element_mean_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, MEAN_HYDRODYNAMIC_REACTION, element_volume); } else { element_mean_force = ZeroVector(3); } added_force_vect[k] += element_force; added_mean_force_vect[k] += element_mean_force; } } instantaneous_force = added_force_vect[0]; mean_force = added_force_vect[0]; for (unsigned int k = 1; k < added_force_vect.size(); ++k){ instantaneous_force += added_force_vect[k]; mean_force += added_mean_force_vect[k]; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // this function assumes linear elements are used double CalculateGlobalFluidVolume(ModelPart& r_fluid_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_fluid_volume = 0.0; #pragma omp parallel for reduction(+ : added_fluid_volume) for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){ for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){ Geometry< Node<3> >& geom = it->GetGeometry(); double element_volume; double element_fluid_volume; if (geom[0].SolutionStepsDataHas(FLUID_FRACTION)){ element_fluid_volume = CalculateScalarIntegralOfLinearInterpolation(geom, FLUID_FRACTION, element_volume); } else { element_fluid_volume = CalculateElementalVolume(geom); } added_fluid_volume += element_fluid_volume; } } return added_fluid_volume; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** template<class matrix_T> double determinant(boost::numeric::ublas::matrix_expression<matrix_T> const& mat_r) { double det = 1.0; matrix_T mLu(mat_r() ); boost::numeric::ublas::permutation_matrix<std::size_t> pivots(mat_r().size1() ); int is_singular = lu_factorize(mLu, pivots); if (!is_singular) { for (std::size_t i=0; i < pivots.size(); ++i) { if (pivots(i) != i) det *= -1.0; det *= mLu(i,i); } } else det = 0.0; return det; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** const DenseMatrix<double> Inverse( const DenseMatrix<double>& m) { assert(m.size1() == m.size2() && "Can only calculate the inverse of square matrices"); switch(m.size1()) { case 1: { assert(m.size1() == 1 && m.size2() == 1 && "Only for 1x1 matrices"); const double determinant = CalcDeterminant(m); assert(determinant != 0.0); assert(m(0,0) != 0.0 && "Cannot take the inverse of matrix [0]"); DenseMatrix<double> n(1,1); n(0,0) = 1.0 / determinant; return n; } case 2: { assert(m.size1() == 2 && m.size2() == 2 && "Only for 2x2 matrices"); const double determinant = CalcDeterminant(m); assert(determinant != 0.0); const double a = m(0,0); const double b = m(0,1); const double c = m(1,0); const double d = m(1,1); DenseMatrix<double> n(2,2); n(0,0) = d / determinant; n(0,1) = -b / determinant; n(1,0) = -c / determinant; n(1,1) = a / determinant; return n; } case 3: { assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices"); const double determinant = CalcDeterminant(m); assert(determinant != 0.0); const double a = m(0,0); const double b = m(0,1); const double c = m(0,2); const double d = m(1,0); const double e = m(1,1); const double f = m(1,2); const double g = m(2,0); const double h = m(2,1); const double k = m(2,2); DenseMatrix<double> n(3,3); const double new_a = ((e*k)-(f*h)) / determinant; const double new_b = -((d*k)-(f*g)) / determinant; const double new_c = ((d*h)-(e*g)) / determinant; const double new_d = -((b*k)-(c*h)) / determinant; const double new_e = ((a*k)-(c*g)) / determinant; const double new_f = -((a*h)-(b*g)) / determinant; const double new_g = ((b*f)-(c*e)) / determinant; const double new_h = -((a*f)-(c*d)) / determinant; const double new_k = ((a*e)-(b*d)) / determinant; n(0,0) = new_a; n(1,0) = new_b; n(2,0) = new_c; n(0,1) = new_d; n(1,1) = new_e; n(2,1) = new_f; n(0,2) = new_g; n(1,2) = new_h; n(2,2) = new_k; return n; } default: { //Use blockwise inversion //Matrix::Chop returns a std::vector //[ A at [0] B at [1] ] //[ C at [2] D at [4] ] const std::vector<DenseMatrix<double> > v = Chop(m); const DenseMatrix<double>& a = v[0]; assert(a.size1() == a.size2()); const DenseMatrix<double> a_inv = Inverse(a); const DenseMatrix<double>& b = v[1]; const DenseMatrix<double>& c = v[2]; const DenseMatrix<double>& d = v[3]; const DenseMatrix<double> term = d - prod( DenseMatrix<double>(prod(c,a_inv)), b ); const DenseMatrix<double> term_inv = Inverse(term); const DenseMatrix<double> new_a = a_inv + DenseMatrix<double>(prod( DenseMatrix<double>(prod( DenseMatrix<double>(prod( DenseMatrix<double>(prod( a_inv, b)), term_inv)), c)), a_inv)); const DenseMatrix<double> new_b = - DenseMatrix<double>(prod( DenseMatrix<double>(prod( a_inv, b)), term_inv)); const DenseMatrix<double> new_c = - DenseMatrix<double>(prod( DenseMatrix<double>(prod( term_inv, c)), a_inv)); const DenseMatrix<double> new_d = term_inv; std::vector<DenseMatrix<double> > w; w.push_back(new_a); w.push_back(new_b); w.push_back(new_c); w.push_back(new_d); const DenseMatrix<double> result = Unchop(w); return result; } } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<double>& origin_variable, const Variable<double>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); double& destination_value = p_node->FastGetSolutionStepValue(destination_variable); const double& origin_value = p_node->FastGetSolutionStepValue(origin_variable); destination_value = origin_value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<array_1d<double, 3>>& origin_variable, const Variable<array_1d<double, 3>>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable); const array_1d<double, 3>& origin_value = p_node->FastGetSolutionStepValue(origin_variable); noalias(destination_value) = origin_value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void SetValueOfAllNotes(ModelPart& r_model_part, const double& value, const Variable<double>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); double& destination_value = p_node->FastGetSolutionStepValue(destination_variable); destination_value = value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void SetValueOfAllNotes(ModelPart& r_model_part, const array_1d<double, 3>& value, const Variable<array_1d<double, 3>>& destination_variable) { #pragma omp parallel for for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){ ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i; Node<3>::Pointer p_node = *(i_particle.base()); array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable); noalias(destination_value) = value; } } //************************************************************************************************************************************************** //************************************************************************************************************************************************** private: bool mPressuresFilled; bool mFirstGradientRecovery; bool mFirstLaplacianRecovery; bool mSomeCloudsDontWork; bool mCalculatingTheGradient; bool mCalculatingTheLaplacian; bool mFirstTimeAppending; double mLastMeasurementTime; double mLastPressureVariation; double mTotalDomainVolume; std::vector<double> mPressures; std::vector<DenseVector<double> > mFirstRowsOfB; //************************************************************************************************************************************************** //************************************************************************************************************************************************** inline double CalculateArea(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2) { const double x10 = x1 - x0; const double y10 = y1 - y0; const double x20 = x2 - x0; const double y20 = y2 - y0; const double area = 0.5 * std::abs(x10 * y20 - x20 * y10); return area; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666666667; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateElementalVolume(const Geometry<Node <3> >& geom) { double vol; if (TDim == 2){ double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); vol = CalculateArea(x0, y0, x1, y1, x2, y2); } else { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); } if (vol == 0.0){ KRATOS_THROW_ERROR(std::logic_error, "element with zero area found with the current geometry ", geom); } return vol; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateScalarIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<double>& r_var, double& vol) { array_1d<double, 4> N; double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double xc = 0.25 * (x0 + x1 + x2 + x3); double yc = 0.25 * (y0 + y1 + y2 + y3); double zc = 0.25 * (z0 + z1 + z2 + z3); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); if (vol == 0.0){ KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom); } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc); N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc); N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc); N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc); double value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var); for (unsigned int i = 1; i != 4; ++i){ value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var, 0); } return value_at_gauss_point; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol) { array_1d<double, 4> N; double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double xc = 0.25 * (x0 + x1 + x2 + x3); double yc = 0.25 * (y0 + y1 + y2 + y3); double zc = 0.25 * (z0 + z1 + z2 + z3); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); if (vol == 0.0){ KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom); } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc); N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc); N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc); N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc); array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var); for (unsigned int i = 1; i != 4; ++i){ value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var); } return value_at_gauss_point; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol) { array_1d<double, 4> N; double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double xc = 0.25 * (x0 + x1 + x2 + x3); double yc = 0.25 * (y0 + y1 + y2 + y3); double zc = 0.25 * (z0 + z1 + z2 + z3); vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); if (vol == 0.0){ KRATOS_THROW_ERROR(std::logic_error, "Element with zero area found. Its geometry is given by", geom); } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc); N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc); N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc); N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc); array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var) * geom[0].FastGetSolutionStepValue(DENSITY) * geom[0].FastGetSolutionStepValue(FLUID_FRACTION); for (unsigned int i = 1; i != 4; ++i){ value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var) * geom[i].FastGetSolutionStepValue(DENSITY) * geom[i].FastGetSolutionStepValue(FLUID_FRACTION); } return value_at_gauss_point; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** void PerformFirstStepComputations(ModelPart& r_model_part) { mTotalDomainVolume = CalculateDomainVolume(r_model_part); mPressures.resize(r_model_part.Nodes().size()); mLastMeasurementTime = r_model_part.GetProcessInfo()[TIME]; unsigned int i = 0; for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode) { mPressures[i] = inode->FastGetSolutionStepValue(PRESSURE); ++i; } mPressuresFilled = true; mLastPressureVariation = GetRangeWithinVector(mPressures); } //************************************************************************************************************************************************** //************************************************************************************************************************************************** struct IsCloser{ bool operator()(std::pair<unsigned int, double> const& first_pair, std::pair<unsigned int, double> const& second_pair) { return(first_pair.second < second_pair.second || (first_pair.second == second_pair.second && first_pair.first < second_pair.first)); } }; //************************************************************************************************************************************************** //************************************************************************************************************************************************** inline int Factorial(const unsigned int n){ if (n == 0){ return 1; } unsigned int k = n; for (unsigned int i = n - 1; i > 0; --i){ k *= i; } return k; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateTheMaximumEdgeLength(ModelPart& r_model_part) { double max_distance_yet = 0.0; for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){ Geometry<Node<3> >& geom = ielem->GetGeometry(); unsigned int n_nodes = static_cast<unsigned int>(TDim + 1); for (unsigned int k = 1; k < n_nodes - 1; ++k){ for (unsigned int i = k; i < n_nodes; ++i){ array_1d <double, 3> delta_i = geom[k - 1] - geom[i]; double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i); max_distance_yet = max_distance_yet > distance_2 ? max_distance_yet : distance_2; } } } return(std::sqrt(max_distance_yet)); } //************************************************************************************************************************************************** //************************************************************************************************************************************************** double CalculateTheMinumumEdgeLength(ModelPart& r_model_part) { double min_distance_yet = 0.0; bool first_node = true; for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){ Geometry<Node<3> >& geom = ielem->GetGeometry(); if (first_node){ // assign the distance (squared) between any two nodes to min_distance_yet array_1d <double, 3> delta = geom[0] - geom[1]; double distance_2 = DEM_INNER_PRODUCT_3(delta, delta); min_distance_yet = distance_2; } unsigned int n_nodes = static_cast<unsigned int>(TDim + 1); for (unsigned int k = 1; k < n_nodes - 1; ++k){ for (unsigned int i = k; i < n_nodes; ++i){ array_1d <double, 3> delta_i = geom[k - 1] - geom[i]; double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i); min_distance_yet = min_distance_yet < distance_2 ? min_distance_yet : distance_2; } } } return(std::sqrt(min_distance_yet)); } //************************************************************************************************************************************************** //************************************************************************************************************************************************** // The following block of functions is used to calculate explicit matrix inverses and was taken from // Richel BilderBeek's website (http://www.richelbilderbeek.nl/CppUblasMatrixExample6.htm), and it is // transcribed here with a very minor modification double CalcDeterminant(const DenseMatrix<double>& m) { assert(m.size1() == m.size2() && "Can only calculate the determinant of square matrices"); switch(m.size1()) { case 1: { return m(0,0); } case 2: { const double a = m(0,0); const double b = m(0,1); const double c = m(1,0); const double d = m(1,1); const double determinant = (a * d) - (b * c); return determinant; } case 3: { assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices"); const double a = m(0,0); const double b = m(0,1); const double c = m(0,2); const double d = m(1,0); const double e = m(1,1); const double f = m(1,2); const double g = m(2,0); const double h = m(2,1); const double k = m(2,2); const double determinant = (a * ((e*k) - (f*h))) - (b * ((k*d) - (f*g))) + (c * ((d*h) - (e*g))); return determinant; } default: assert(!"Should not get here: unsupported matrix size"); throw std::runtime_error("Unsupported matrix size"); } } ///Chop returns a std::vector of sub-matrices //[ A at [0] B at [1] ] //[ C at [2] D at [4] ] const std::vector<DenseMatrix<double> > Chop( const DenseMatrix<double>& m) { using boost::numeric::ublas::range; using boost::numeric::ublas::matrix_range; std::vector<matrix<double> > v; v.reserve(4); const int midy = m.size1() / 2; const int midx = m.size2() / 2; const matrix_range<const matrix<double> > top_left( m,range(0 ,midy ),range(0 ,midx )); const matrix_range<const matrix<double> > bottom_left( m,range(midy,m.size1()),range(0 ,midx )); const matrix_range<const matrix<double> > top_right( m,range(0 ,midy ),range(midx,m.size2())); const matrix_range<const matrix<double> > bottom_right(m,range(midy,m.size1()),range(midx,m.size2())); v.push_back(matrix<double>(top_left)); v.push_back(matrix<double>(top_right)); v.push_back(matrix<double>(bottom_left)); v.push_back(matrix<double>(bottom_right)); return v; } ///Unchop merges the 4 std::vector of sub-matrices produced by Chop const DenseMatrix<double> Unchop( const std::vector<DenseMatrix<double> >& v) { //Chop returns a std::vector of sub-matrices //[ A at [0] B at [1] ] //[ C at [2] D at [4] ] using boost::numeric::ublas::range; using boost::numeric::ublas::matrix_range; assert(v.size() == 4); assert(v[0].size1() == v[1].size1()); assert(v[2].size1() == v[3].size1()); assert(v[0].size2() == v[2].size2()); assert(v[1].size2() == v[3].size2()); DenseMatrix<double> m(v[0].size1() + v[2].size1(),v[0].size2() + v[1].size2()); for (int quadrant=0; quadrant!=4; ++quadrant) { const DenseMatrix<double>& w = v[quadrant]; const std::size_t n_rows = v[quadrant].size1(); const std::size_t n_cols = v[quadrant].size2(); const int offset_x = quadrant % 2 ? v[0].size2() : 0; const int offset_y = quadrant / 2 ? v[0].size1() : 0; for (std::size_t row=0; row!=n_rows; ++row) { for (std::size_t col=0; col!=n_cols; ++col) { m(offset_y + row, offset_x + col) = w(row,col); } } } assert(v[0].size1() + v[2].size1() == m.size1()); assert(v[1].size1() + v[3].size1() == m.size1()); assert(v[0].size2() + v[1].size2() == m.size2()); assert(v[2].size2() + v[3].size2() == m.size2()); return m; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** ///@} ///@name Member r_variables ///@{ DenseVector<unsigned int> mElementsPartition; ///@} ///@name Un accessible methods ///@{ double GetRangeWithinVector(const std::vector<double>& vector) { double min = vector[0]; double max = vector[0]; for (unsigned int i = 0; i != vector.size(); ++i){ min = std::min(min, mPressures[i]); max = std::max(max, mPressures[i]); } return (max - min); } DenseVector<unsigned int>& GetElementPartition() { return mElementsPartition; } ElementIterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k) { return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k]; } ElementIterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k) { return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k + 1]; } //************************************************************************************************************************************************** //************************************************************************************************************************************************** }; // Class CustomFunctionsCalculator } // namespace Kratos. #endif // KRATOS_CREATE_AND_DESTROY defined
imag_self_energy_with_g.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "imag_self_energy_with_g.h" #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include "lagrid.h" #include "phonoc_array.h" #include "phonoc_utils.h" #include "triplet.h" static long ise_set_g_pos_frequency_point(long (*g_pos)[4], const long num_band0, const long num_band, const char *g_zero); static void detailed_imag_self_energy_at_triplet( double *detailed_imag_self_energy, double *imag_self_energy, const long num_band0, const long num_band, const double *fc3_normal_squared, const double *frequencies, const long triplet[3], const double *g1, const double *g2_3, const char *g_zero, const double *temperatures, const long num_temps, const double cutoff_frequency); static double collect_detailed_imag_self_energy( double *imag_self_energy, const long num_band, const double *fc3_normal_squared, const double *n1, const double *n2, const double *g1, const double *g2_3, const char *g_zero); static double collect_detailed_imag_self_energy_0K( double *imag_self_energy, const long num_band, const double *fc3_normal_squared, const double *n1, const double *n2, const double *g, const char *g_zero); static void set_occupations(double *n1, double *n2, const long num_band, const double temperature, const long triplet[3], const double *frequencies, const double cutoff_frequency); void ise_get_imag_self_energy_at_bands_with_g( double *imag_self_energy, const Darray *fc3_normal_squared, const double *frequencies, const long (*triplets)[3], const long *triplet_weights, const double *g, const char *g_zero, const double temperature, const double cutoff_frequency, const long num_frequency_points, const long frequency_point_index) { long i, j, num_triplets, num_band0, num_band, num_band_prod; long num_g_pos, g_index_dims, g_index_shift; long(*g_pos)[4]; double *ise; long at_a_frequency_point; g_pos = NULL; ise = NULL; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; num_band_prod = num_band0 * num_band * num_band; ise = (double *)malloc(sizeof(double) * num_triplets * num_band0); if (frequency_point_index < 0) { /* frequency_points == frequencies at bands */ at_a_frequency_point = 0; g_index_dims = num_band_prod; g_index_shift = 0; } else { /* At an arbitrary frequency point. */ at_a_frequency_point = 1; g_index_dims = num_frequency_points * num_band * num_band; g_index_shift = frequency_point_index * num_band * num_band; } #ifdef _OPENMP #pragma omp parallel for private(num_g_pos, j, g_pos) #endif for (i = 0; i < num_triplets; i++) { g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod); /* ise_set_g_pos only works for the case of frquency points at */ /* bands. For frequency sampling mode, g_zero is assumed all */ /* with the array shape of (num_triplets, num_band0, num_band, */ /* num_band). */ if (at_a_frequency_point) { num_g_pos = ise_set_g_pos_frequency_point( g_pos, num_band0, num_band, g_zero + i * g_index_dims + g_index_shift); } else { num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band, g_zero + i * num_band_prod); } ise_imag_self_energy_at_triplet( ise + i * num_band0, num_band0, num_band, fc3_normal_squared->data + i * num_band_prod, frequencies, triplets[i], triplet_weights[i], g + i * g_index_dims + g_index_shift, g + (i + num_triplets) * g_index_dims + g_index_shift, g_pos, num_g_pos, &temperature, 1, cutoff_frequency, 0, at_a_frequency_point); free(g_pos); g_pos = NULL; } for (i = 0; i < num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { for (j = 0; j < num_band0; j++) { imag_self_energy[j] += ise[i * num_band0 + j]; } } free(ise); ise = NULL; } void ise_get_detailed_imag_self_energy_at_bands_with_g( double *detailed_imag_self_energy, double *imag_self_energy_N, double *imag_self_energy_U, const Darray *fc3_normal_squared, const double *frequencies, const long (*triplets)[3], const long *triplet_weights, const long (*bz_grid_addresses)[3], const double *g, const char *g_zero, const double temperature, const double cutoff_frequency) { double *ise; long i, j, num_triplets, num_band0, num_band, num_band_prod; long *is_N; double ise_tmp, N, U; ise = NULL; is_N = NULL; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; num_band_prod = num_band0 * num_band * num_band; ise = (double *)malloc(sizeof(double) * num_triplets * num_band0); /* detailed_imag_self_energy has the same shape as fc3_normal_squared. */ #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < num_triplets; i++) { detailed_imag_self_energy_at_triplet( detailed_imag_self_energy + i * num_band_prod, ise + i * num_band0, num_band0, num_band, fc3_normal_squared->data + i * num_band_prod, frequencies, triplets[i], g + i * num_band_prod, g + (i + num_triplets) * num_band_prod, g_zero + i * num_band_prod, &temperature, 1, cutoff_frequency); } is_N = (long *)malloc(sizeof(long) * num_triplets); for (i = 0; i < num_triplets; i++) { is_N[i] = tpl_is_N(triplets[i], bz_grid_addresses); } for (i = 0; i < num_band0; i++) { N = 0; U = 0; /* #ifdef _OPENMP */ /* #pragma omp parallel for private(ise_tmp) reduction(+:N,U) */ /* #endif */ for (j = 0; j < num_triplets; j++) { ise_tmp = ise[j * num_band0 + i] * triplet_weights[j]; if (is_N[j]) { N += ise_tmp; } else { U += ise_tmp; } } imag_self_energy_N[i] = N; imag_self_energy_U[i] = U; } free(is_N); is_N = NULL; free(ise); ise = NULL; } void ise_imag_self_energy_at_triplet( double *imag_self_energy, const long num_band0, const long num_band, const double *fc3_normal_squared, const double *frequencies, const long triplet[3], const long triplet_weight, const double *g1, const double *g2_3, const long (*g_pos)[4], const long num_g_pos, const double *temperatures, const long num_temps, const double cutoff_frequency, const long openmp_at_bands, const long at_a_frequency_point) { long i, j; double *n1, *n2; long g_pos_3; n1 = (double *)malloc(sizeof(double) * num_temps * num_band); n2 = (double *)malloc(sizeof(double) * num_temps * num_band); for (i = 0; i < num_temps; i++) { set_occupations(n1 + i * num_band, n2 + i * num_band, num_band, temperatures[i], triplet, frequencies, cutoff_frequency); } for (i = 0; i < num_band0 * num_temps; i++) { imag_self_energy[i] = 0; } /* Do not use OpenMP here!! */ /* g_pos[i][0] takes value 0 <= x < num_band0 only, */ /* which causes race condition. */ for (i = 0; i < num_g_pos; i++) { if (at_a_frequency_point) { /* At an arbitrary frequency point */ g_pos_3 = g_pos[i][3] % (num_band * num_band); } else { /* frequency_points == frequencies at bands */ g_pos_3 = g_pos[i][3]; } for (j = 0; j < num_temps; j++) { if (n1[j * num_band + g_pos[i][1]] < 0 || n2[j * num_band + g_pos[i][2]] < 0) { ; } else { if (temperatures[j] > 0) { imag_self_energy[j * num_band0 + g_pos[i][0]] += ((n1[j * num_band + g_pos[i][1]] + n2[j * num_band + g_pos[i][2]] + 1) * g1[g_pos_3] + (n1[j * num_band + g_pos[i][1]] - n2[j * num_band + g_pos[i][2]]) * g2_3[g_pos_3]) * fc3_normal_squared[g_pos[i][3]] * triplet_weight; } else { imag_self_energy[j * num_band0 + g_pos[i][0]] += g1[g_pos_3] * fc3_normal_squared[g_pos[i][3]] * triplet_weight; } } } } free(n1); n1 = NULL; free(n2); n2 = NULL; } long ise_set_g_pos(long (*g_pos)[4], const long num_band0, const long num_band, const char *g_zero) { long num_g_pos, j, k, l, jkl; num_g_pos = 0; jkl = 0; for (j = 0; j < num_band0; j++) { for (k = 0; k < num_band; k++) { for (l = 0; l < num_band; l++) { if (!g_zero[jkl]) { g_pos[num_g_pos][0] = j; g_pos[num_g_pos][1] = k; g_pos[num_g_pos][2] = l; g_pos[num_g_pos][3] = jkl; num_g_pos++; } jkl++; } } } return num_g_pos; } static long ise_set_g_pos_frequency_point(long (*g_pos)[4], const long num_band0, const long num_band, const char *g_zero) { long num_g_pos, j, k, l, kl, jkl; num_g_pos = 0; jkl = 0; for (j = 0; j < num_band0; j++) { kl = 0; for (k = 0; k < num_band; k++) { for (l = 0; l < num_band; l++) { if (!g_zero[kl]) { g_pos[num_g_pos][0] = j; g_pos[num_g_pos][1] = k; g_pos[num_g_pos][2] = l; g_pos[num_g_pos][3] = jkl; num_g_pos++; } jkl++; kl++; } } } return num_g_pos; } static void detailed_imag_self_energy_at_triplet( double *detailed_imag_self_energy, double *imag_self_energy, const long num_band0, const long num_band, const double *fc3_normal_squared, const double *frequencies, const long triplet[3], const double *g1, const double *g2_3, const char *g_zero, const double *temperatures, const long num_temps, const double cutoff_frequency) { long i, j, adrs_shift; double *n1, *n2; n1 = NULL; n2 = NULL; n1 = (double *)malloc(sizeof(double) * num_band); n2 = (double *)malloc(sizeof(double) * num_band); for (i = 0; i < num_temps; i++) { set_occupations(n1, n2, num_band, temperatures[i], triplet, frequencies, cutoff_frequency); for (j = 0; j < num_band0; j++) { adrs_shift = j * num_band * num_band; if (temperatures[i] > 0) { imag_self_energy[i * num_band0 + j] = collect_detailed_imag_self_energy( detailed_imag_self_energy + adrs_shift, num_band, fc3_normal_squared + adrs_shift, n1, n2, g1 + adrs_shift, g2_3 + adrs_shift, g_zero + adrs_shift); } else { imag_self_energy[i * num_band0 + j] = collect_detailed_imag_self_energy_0K( detailed_imag_self_energy + adrs_shift, num_band, fc3_normal_squared + adrs_shift, n1, n2, g1 + adrs_shift, g_zero + adrs_shift); } } } free(n1); n1 = NULL; free(n2); n2 = NULL; } static double collect_detailed_imag_self_energy( double *imag_self_energy, const long num_band, const double *fc3_normal_squared, const double *n1, const double *n2, const double *g1, const double *g2_3, const char *g_zero) { long ij, i, j; double sum_g; sum_g = 0; for (ij = 0; ij < num_band * num_band; ij++) { imag_self_energy[ij] = 0; if (g_zero[ij]) { continue; } i = ij / num_band; j = ij % num_band; if (n1[i] < 0 || n2[j] < 0) { continue; } imag_self_energy[ij] = (((n1[i] + n2[j] + 1) * g1[ij] + (n1[i] - n2[j]) * g2_3[ij]) * fc3_normal_squared[ij]); sum_g += imag_self_energy[ij]; } return sum_g; } static double collect_detailed_imag_self_energy_0K( double *imag_self_energy, const long num_band, const double *fc3_normal_squared, const double *n1, const double *n2, const double *g1, const char *g_zero) { long ij, i, j; double sum_g; sum_g = 0; for (ij = 0; ij < num_band * num_band; ij++) { imag_self_energy[ij] = 0; if (g_zero[ij]) { continue; } i = ij / num_band; j = ij % num_band; if (n1[i] < 0 || n2[j] < 0) { continue; } imag_self_energy[ij] = g1[ij] * fc3_normal_squared[ij]; sum_g += imag_self_energy[ij]; } return sum_g; } static void set_occupations(double *n1, double *n2, const long num_band, const double temperature, const long triplet[3], const double *frequencies, const double cutoff_frequency) { long j; double f1, f2; for (j = 0; j < num_band; j++) { f1 = frequencies[triplet[1] * num_band + j]; f2 = frequencies[triplet[2] * num_band + j]; if (f1 > cutoff_frequency) { n1[j] = phonoc_bose_einstein(f1, temperature); } else { n1[j] = -1; } if (f2 > cutoff_frequency) { n2[j] = phonoc_bose_einstein(f2, temperature); } else { n2[j] = -1; } } }
NAS_MG.c
//--------------------------------------------------------------------- // program MG //--------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> #if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) && !defined(CLASS_D) && !defined(CLASS_E) # define CLASS_W #endif //---------- // Class S: //---------- #ifdef CLASS_S # define NX_DEFAULT 32 # define NY_DEFAULT 32 # define NZ_DEFAULT 32 # define NIT_DEFAULT 4 # define LM 5 # define LT_DEFAULT 5 # define DEBUG_DEFAULT 0 # define NDIM1 5 # define NDIM2 5 # define NDIM3 5 # define ONE 1 #endif //---------- // Class W: //---------- #ifdef CLASS_W # define NX_DEFAULT 128 # define NY_DEFAULT 128 # define NZ_DEFAULT 128 # define NIT_DEFAULT 4 # define LM 7 # define LT_DEFAULT 7 # define DEBUG_DEFAULT 0 # define NDIM1 7 # define NDIM2 7 # define NDIM3 7 # define ONE 1 #endif //---------- // Class A: //---------- #ifdef CLASS_A # define NX_DEFAULT 256 # define NY_DEFAULT 256 # define NZ_DEFAULT 256 # define NIT_DEFAULT 4 # define LM 8 # define LT_DEFAULT 8 # define DEBUG_DEFAULT 0 # define NDIM1 8 # define NDIM2 8 # define NDIM3 8 # define ONE 1 #endif //---------- // Class B: //---------- #ifdef CLASS_B # define NX_DEFAULT 256 # define NY_DEFAULT 256 # define NZ_DEFAULT 256 # define NIT_DEFAULT 20 # define LM 8 # define LT_DEFAULT 8 # define DEBUG_DEFAULT 0 # define NDIM1 8 # define NDIM2 8 # define NDIM3 8 # define ONE 1 #endif //---------- // Class C: //---------- #ifdef CLASS_C # define NX_DEFAULT 512 # define NY_DEFAULT 512 # define NZ_DEFAULT 512 # define NIT_DEFAULT 20 # define LM 9 # define LT_DEFAULT 9 # define DEBUG_DEFAULT 0 # define NDIM1 9 # define NDIM2 9 # define NDIM3 9 # define ONE 1 #endif //---------- // Class D: //---------- #ifdef CLASS_D # define NX_DEFAULT 1024 # define NY_DEFAULT 1024 # define NZ_DEFAULT 1024 # define NIT_DEFAULT 50 # define LM 10 # define LT_DEFAULT 10 # define DEBUG_DEFAULT 0 # define NDIM1 10 # define NDIM2 10 # define NDIM3 10 # define ONE 1 #endif //---------- // Class E: //---------- #ifdef CLASS_E # define NX_DEFAULT 2048 # define NY_DEFAULT 2048 # define NZ_DEFAULT 2048 # define NIT_DEFAULT 50 # define LM 11 # define LT_DEFAULT 11 # define DEBUG_DEFAULT 0 # define NDIM1 11 # define NDIM2 11 # define NDIM3 11 # define ONE 1 #endif typedef struct { double real; double imag; } dcomplex; #define min(x,y) ((x) < (y) ? (x) : (y)) #define max(x,y) ((x) > (y) ? (x) : (y)) // actual dimension including ghost cells for communications #define NM (2+(1<<LM)) // size of rhs array #define NV (ONE*(2+(1<<NDIM1))*(2+(1<<NDIM2))*(2+(1<<NDIM3))) // size of residual array #define NR (((NV+NM*NM+5*NM+7*LM+6)/7)*8) // maximum number of levels #define MAXLEVEL (LT_DEFAULT+1) //--------------------------------------------------------------------- /* common /mg3/ */ int nx[MAXLEVEL + 1]; int ny[MAXLEVEL + 1]; int nz[MAXLEVEL + 1]; /* common /ClassType/ */ char Class; /* common /my_debug/ */ int debug_vec[8]; /* common /fap/ */ int m1[MAXLEVEL + 1]; int m2[MAXLEVEL + 1]; int m3[MAXLEVEL + 1]; int ir[MAXLEVEL + 1]; int lt, lb; //--------------------------------------------------------------------- // Set at m=1024, can handle cases up to 1024^3 case //--------------------------------------------------------------------- #define M (NM+1) /* common /timers/ */ #define T_init 0 #define T_bench 1 #define T_mg3P 2 #define T_psinv 3 #define T_resid 4 #define T_resid2 5 #define T_rprj3 6 #define T_interp 7 #define T_norm2 8 #define T_comm3 9 #define T_last 10 //-------------------------------------------------------------------------c // These arrays are in common because they are quite large // and probably shouldn't be allocated on the stack. They // are always passed as subroutine args. //-------------------------------------------------------------------------c /* commcon /noautom/ */ double u[NR]; double v[NR]; double r[NR]; /* common /grid/ */ int is1, is2, is3, ie1, ie2, ie3; void setup(int *n1, int *n2, int *n3); void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3); void psinv(void * or , void *ou, int n1, int n2, int n3, double c[4], int k); void resid(void *ou, void *ov, void * or , int n1, int n2, int n3, double a[4], int k); void rprj3(void * or , int m1k, int m2k, int m3k, void *os, int m1j, int m2j, int m3j, int k); void interp(void *oz, int mm1, int mm2, int mm3, void *ou, int n1, int n2, int n3, int k); void norm2u3(void * or , int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz); void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk); void comm3(void *ou, int n1, int n2, int n3, int kk); void zran3(void *oz, int n1, int n2, int n3, int nx, int ny, int k); void showall(void *oz, int n1, int n2, int n3); double power(double a, int n); void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2], int m, int ind); void zero3(void *oz, int n1, int n2, int n3); double randlc( double *x, double a ); void vranlc( int n, double *x, double a, double y[] ); void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified); double start[64], elapsed[64]; double elapsed_time( void ); void timer_clear( int n ); void timer_start( int n ); void timer_stop( int n ); double timer_read( int n ); void wtime(double *t); int main() { //-------------------------------------------------------------------------c // k is the current level. It is passed down through subroutine args // and is NOT global. it is the current iteration //-------------------------------------------------------------------------c int k, it; double t, tinit, mflops; double a[4], c[4]; double rnm2, rnmu, old2, oldu, epsilon; int n1, n2, n3, nit; double nn, verify_value, err; int verified; int i; char *t_names[T_last]; double tmax; for (i = T_init; i < T_last; i++) { timer_clear(i); } timer_start(T_init); printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - MG Benchmark\n\n"); printf(" No input file. Using compiled defaults \n"); lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; for (i = 0; i <= 7; i++) { debug_vec[i] = DEBUG_DEFAULT; } if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) { Class = 'U'; } else if ( nx[lt] == 32 && nit == 4 ) { Class = 'S'; } else if ( nx[lt] == 128 && nit == 4 ) { Class = 'W'; } else if ( nx[lt] == 256 && nit == 4 ) { Class = 'A'; } else if ( nx[lt] == 256 && nit == 20 ) { Class = 'B'; } else if ( nx[lt] == 512 && nit == 20 ) { Class = 'C'; } else if ( nx[lt] == 1024 && nit == 50 ) { Class = 'D'; } else if ( nx[lt] == 2048 && nit == 50 ) { Class = 'E'; } else { Class = 'U'; } //--------------------------------------------------------------------- // Use these for debug info: //--------------------------------------------------------------------- // debug_vec(0) = 1 !=> report all norms // debug_vec(1) = 1 !=> some setup information // debug_vec(1) = 2 !=> more setup information // debug_vec(2) = k => at level k or below, show result of resid // debug_vec(3) = k => at level k or below, show result of psinv // debug_vec(4) = k => at level k or below, show result of rprj // debug_vec(5) = k => at level k or below, show result of interp // debug_vec(6) = 1 => (unused) // debug_vec(7) = 1 => (unused) //--------------------------------------------------------------------- a[0] = -8.0 / 3.0; a[1] = 0.0; a[2] = 1.0 / 6.0; a[3] = 1.0 / 12.0; if (Class == 'A' || Class == 'S' || Class == 'W') { //--------------------------------------------------------------------- // Coefficients for the S(a) smoother //--------------------------------------------------------------------- c[0] = -3.0 / 8.0; c[1] = +1.0 / 32.0; c[2] = -1.0 / 64.0; c[3] = 0.0; } else { //--------------------------------------------------------------------- // Coefficients for the S(b) smoother //--------------------------------------------------------------------- c[0] = -3.0 / 17.0; c[1] = +1.0 / 33.0; c[2] = -1.0 / 61.0; c[3] = 0.0; } lb = 1; k = lt; setup(&n1, &n2, &n3); zero3(u, n1, n2, n3); zran3(v, n1, n2, n3, nx[lt], ny[lt], k); norm2u3(v, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); printf(" Size: %4dx%4dx%4d (class %c)\n", nx[lt], ny[lt], nz[lt], Class); printf(" Iterations: %3d\n", nit); printf("\n"); resid(u, v, r, n1, n2, n3, a, k); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); old2 = rnm2; oldu = rnmu; //--------------------------------------------------------------------- // One iteration for startup //--------------------------------------------------------------------- mg3P(u, v, r, a, c, n1, n2, n3); resid(u, v, r, n1, n2, n3, a, k); setup(&n1, &n2, &n3); zero3(u, n1, n2, n3); zran3(v, n1, n2, n3, nx[lt], ny[lt], k); timer_stop(T_init); tinit = timer_read(T_init); printf(" Initialization time: %15.3f seconds\n\n", tinit); for (i = T_bench; i < T_last; i++) { timer_clear(i); } timer_start(T_bench); resid(u, v, r, n1, n2, n3, a, k); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); old2 = rnm2; oldu = rnmu; for (it = 1; it <= nit; it++) { if ((it == 1) || (it == nit) || ((it % 5) == 0)) { printf(" iter %3d\n", it); } mg3P(u, v, r, a, c, n1, n2, n3); resid(u, v, r, n1, n2, n3, a, k); } norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); timer_stop(T_bench); t = timer_read(T_bench); verified = 0; verify_value = 0.0; printf("\n Benchmark completed\n"); epsilon = 1.0e-8; if (Class != 'U') { if (Class == 'S') { verify_value = 0.5307707005734e-04; } else if (Class == 'W') { verify_value = 0.6467329375339e-05; } else if (Class == 'A') { verify_value = 0.2433365309069e-05; } else if (Class == 'B') { verify_value = 0.1800564401355e-05; } else if (Class == 'C') { verify_value = 0.5706732285740e-06; } else if (Class == 'D') { verify_value = 0.1583275060440e-09; } else if (Class == 'E') { verify_value = 0.8157592357404e-10; } err = fabs( rnm2 - verify_value ) / verify_value; // err = fabs( rnm2 - verify_value ); if (err <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.13E\n", rnm2); printf(" Error is %20.13E\n", err); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.13E\n", rnm2); printf(" The correct L2 Norm is %20.13E\n", verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); printf(" L2 Norm is %20.13E\n", rnm2); } nn = 1.0 * nx[lt] * ny[lt] * nz[lt]; if (t != 0.0) { mflops = 58.0 * nit * nn * 1.0e-6 / t; } else { mflops = 0.0; } print_results("MG", Class, nx[lt], ny[lt], nz[lt], nit, t, mflops, " floating point", verified); int exitValue = verified ? 0 : 1; return exitValue; } void setup(int *n1, int *n2, int *n3) { int k, j; int ax, mi[MAXLEVEL + 1][3]; int ng[MAXLEVEL + 1][3]; ng[lt][0] = nx[lt]; ng[lt][1] = ny[lt]; ng[lt][2] = nz[lt]; for (k = lt - 1; k >= 1; k--) { for (ax = 0; ax < 3; ax++) { ng[k][ax] = ng[k + 1][ax] / 2; } } #pragma omp parallel for default(shared) private(k) firstprivate(lt, ng) for (k = lt; k >= 1; k--) { nx[k] = ng[k][0]; ny[k] = ng[k][1]; nz[k] = ng[k][2]; } #pragma omp parallel for default(shared) private(k, ax) firstprivate(lt, ng) for (k = lt; k >= 1; k--) { for (ax = 0; ax < 3; ax++) { mi[k][ax] = 2 + ng[k][ax]; } m1[k] = mi[k][0]; m2[k] = mi[k][1]; m3[k] = mi[k][2]; } k = lt; is1 = 2 + ng[k][0] - ng[lt][0]; ie1 = 1 + ng[k][0]; *n1 = 3 + ie1 - is1; is2 = 2 + ng[k][1] - ng[lt][1]; ie2 = 1 + ng[k][1]; *n2 = 3 + ie2 - is2; is3 = 2 + ng[k][2] - ng[lt][2]; ie3 = 1 + ng[k][2]; *n3 = 3 + ie3 - is3; ir[lt] = 0; for (j = lt - 1; j >= 1; j--) { ir[j] = ir[j + 1] + ONE * m1[j + 1] * m2[j + 1] * m3[j + 1]; } if (debug_vec[1] >= 1) { printf(" in setup, \n"); printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", k, lt, ng[k][0], ng[k][1], ng[k][2], *n1, *n2, *n3, is1, is2, is3, ie1, ie2, ie3); } } //--------------------------------------------------------------------- // multigrid V-cycle routine //--------------------------------------------------------------------- void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3) { int j, k; //--------------------------------------------------------------------- // down cycle. // restrict the residual from the find grid to the coarse //--------------------------------------------------------------------- for (k = lt; k >= lb + 1; k--) { j = k - 1; rprj3(&r[ir[k]], m1[k], m2[k], m3[k], &r[ir[j]], m1[j], m2[j], m3[j], k); } k = lb; //--------------------------------------------------------------------- // compute an approximate solution on the coarsest grid //--------------------------------------------------------------------- zero3(&u[ir[k]], m1[k], m2[k], m3[k]); psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); for (k = lb + 1; k <= lt - 1; k++) { j = k - 1; //--------------------------------------------------------------------- // prolongate from level k-1 to k //--------------------------------------------------------------------- zero3(&u[ir[k]], m1[k], m2[k], m3[k]); interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k); //--------------------------------------------------------------------- // compute residual for level k //--------------------------------------------------------------------- resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k); //--------------------------------------------------------------------- // apply smoother //--------------------------------------------------------------------- psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k); resid(u, v, r, n1, n2, n3, a, k); psinv(r, u, n1, n2, n3, c, k); } //--------------------------------------------------------------------- // psinv applies an approximate inverse as smoother: u = u + Cr // // This implementation costs 15A + 4M per result, where // A and M denote the costs of Addition and Multiplication. // Presuming coefficient c(3) is zero (the NPB assumes this, // but it is thus not a general case), 2A + 1M may be eliminated, // resulting in 13A + 3M. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- void psinv(void * or , void *ou, int n1, int n2, int n3, double c[4], int k) { double (*r)[n2][n1] = (double (*)[n2][n1]) or; double (*u)[n2][n1] = (double (*)[n2][n1])ou; int i3, i2, i1; double r1[M], r2[M]; #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1, r, c, r1, r2) for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2 - 1][i1] + r[i3][i2 + 1][i1] + r[i3 - 1][i2][i1] + r[i3 + 1][i2][i1]; r2[i1] = r[i3 - 1][i2 - 1][i1] + r[i3 - 1][i2 + 1][i1] + r[i3 + 1][i2 - 1][i1] + r[i3 + 1][i2 + 1][i1]; } for (i1 = 1; i1 < n1 - 1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1 - 1] + r[i3][i2][i1 + 1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1 - 1] + r1[i1 + 1] ); //-------------------------------------------------------------------- // Assume c[3] = 0 (Enable line below if c[3] not= 0) //-------------------------------------------------------------------- // + c[3] * ( r2[i1-1] + r2[i1+1] ) //-------------------------------------------------------------------- } } } //--------------------------------------------------------------------- // exchange boundary points //--------------------------------------------------------------------- comm3(u, n1, n2, n3, k); if (debug_vec[0] >= 1) { rep_nrm(u, n1, n2, n3, " psinv", k); } if (debug_vec[3] >= k) { showall(u, n1, n2, n3); } } //--------------------------------------------------------------------- // resid computes the residual: r = v - Au // // This implementation costs 15A + 4M per result, where // A and M denote the costs of Addition (or Subtraction) and // Multiplication, respectively. // Presuming coefficient a(1) is zero (the NPB assumes this, // but it is thus not a general case), 3A + 1M may be eliminated, // resulting in 12A + 3M. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- void resid(void *ou, void *ov, void * or , int n1, int n2, int n3, double a[4], int k) { double (*u)[n2][n1] = (double (*)[n2][n1])ou; double (*v)[n2][n1] = (double (*)[n2][n1])ov; double (*r)[n2][n1] = (double (*)[n2][n1]) or; int i3, i2, i1; double u1[M], u2[M]; #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1, u, a, v, u1, u2) for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2 - 1][i1] + u[i3][i2 + 1][i1] + u[i3 - 1][i2][i1] + u[i3 + 1][i2][i1]; u2[i1] = u[i3 - 1][i2 - 1][i1] + u[i3 - 1][i2 + 1][i1] + u[i3 + 1][i2 - 1][i1] + u[i3 + 1][i2 + 1][i1]; } for (i1 = 1; i1 < n1 - 1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] //------------------------------------------------------------------- // Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0) //------------------------------------------------------------------- // - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1] // + u1[i1] ) //------------------------------------------------------------------- - a[2] * ( u2[i1] + u1[i1 - 1] + u1[i1 + 1] ) - a[3] * ( u2[i1 - 1] + u2[i1 + 1] ); } } } //--------------------------------------------------------------------- // exchange boundary data //--------------------------------------------------------------------- comm3(r, n1, n2, n3, k); if (debug_vec[0] >= 1) { rep_nrm(r, n1, n2, n3, " resid", k); } if (debug_vec[2] >= k) { showall(r, n1, n2, n3); } } //--------------------------------------------------------------------- // rprj3 projects onto the next coarser grid, // using a trilinear Finite Element projection: s = r' = P r // // This implementation costs 20A + 4M per result, where // A and M denote the costs of Addition and Multiplication. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- void rprj3(void * or , int m1k, int m2k, int m3k, void *os, int m1j, int m2j, int m3j, int k) { double (*r)[m2k][m1k] = (double (*)[m2k][m1k]) or; double (*s)[m2j][m1j] = (double (*)[m2j][m1j])os; int j3, j2, j1, i3, i2, i1, d1, d2, d3, j; double x1[M], y1[M], x2, y2; if (m1k == 3) { d1 = 2; } else { d1 = 1; } if (m2k == 3) { d2 = 2; } else { d2 = 1; } if (m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp parallel for default(shared) private(j3, j2, j1, i3, i2, i1, y2, x2) firstprivate(m3j, d3, m2j, d2, m1j, d1, r, x1, y1) for (j3 = 1; j3 < m3j - 1; j3++) { i3 = 2 * j3 - d3; for (j2 = 1; j2 < m2j - 1; j2++) { i2 = 2 * j2 - d2; for (j1 = 1; j1 < m1j; j1++) { i1 = 2 * j1 - d1; x1[i1] = r[i3 + 1][i2 ][i1] + r[i3 + 1][i2 + 2][i1] + r[i3 ][i2 + 1][i1] + r[i3 + 2][i2 + 1][i1]; y1[i1] = r[i3 ][i2 ][i1] + r[i3 + 2][i2 ][i1] + r[i3 ][i2 + 2][i1] + r[i3 + 2][i2 + 2][i1]; } for (j1 = 1; j1 < m1j - 1; j1++) { i1 = 2 * j1 - d1; y2 = r[i3 ][i2 ][i1 + 1] + r[i3 + 2][i2 ][i1 + 1] + r[i3 ][i2 + 2][i1 + 1] + r[i3 + 2][i2 + 2][i1 + 1]; x2 = r[i3 + 1][i2 ][i1 + 1] + r[i3 + 1][i2 + 2][i1 + 1] + r[i3 ][i2 + 1][i1 + 1] + r[i3 + 2][i2 + 1][i1 + 1]; s[j3][j2][j1] = 0.5 * r[i3 + 1][i2 + 1][i1 + 1] + 0.25 * (r[i3 + 1][i2 + 1][i1] + r[i3 + 1][i2 + 1][i1 + 2] + x2) + 0.125 * (x1[i1] + x1[i1 + 2] + y2) + 0.0625 * (y1[i1] + y1[i1 + 2]); } } } j = k - 1; comm3(s, m1j, m2j, m3j, j); if (debug_vec[0] >= 1) { rep_nrm(s, m1j, m2j, m3j, " rprj3", k - 1); } if (debug_vec[4] >= k) { showall(s, m1j, m2j, m3j); } } //--------------------------------------------------------------------- // interp adds the trilinear interpolation of the correction // from the coarser grid to the current approximation: u = u + Qu' // // Observe that this implementation costs 16A + 4M, where // A and M denote the costs of Addition and Multiplication. // Note that this vectorizes, and is also fine for cache // based machines. Vector machines may get slightly better // performance however, with 8 separate "do i1" loops, rather than 4. //--------------------------------------------------------------------- void interp(void *oz, int mm1, int mm2, int mm3, void *ou, int n1, int n2, int n3, int k) { double (*z)[mm2][mm1] = (double (*)[mm2][mm1])oz; double (*u)[n2][n1] = (double (*)[n2][n1])ou; int i3, i2, i1, d1, d2, d3, t1, t2, t3; // note that m = 1037 in globals.h but for this only need to be // 535 to handle up to 1024^3 // integer m // parameter( m=535 ) double z1[M], z2[M], z3[M]; if (n1 != 3 && n2 != 3 && n3 != 3) { #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(mm3, mm2, mm1, z, z1, z2, z3) for (i3 = 0; i3 < mm3 - 1; i3++) { for (i2 = 0; i2 < mm2 - 1; i2++) { for (i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2 + 1][i1] + z[i3][i2][i1]; z2[i1] = z[i3 + 1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3 + 1][i2 + 1][i1] + z[i3 + 1][i2][i1] + z1[i1]; } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3][2 * i2][2 * i1] = u[2 * i3][2 * i2][2 * i1] + z[i3][i2][i1]; u[2 * i3][2 * i2][2 * i1 + 1] = u[2 * i3][2 * i2][2 * i1 + 1] + 0.5 * (z[i3][i2][i1 + 1] + z[i3][i2][i1]); } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3][2 * i2 + 1][2 * i1] = u[2 * i3][2 * i2 + 1][2 * i1] + 0.5 * z1[i1]; u[2 * i3][2 * i2 + 1][2 * i1 + 1] = u[2 * i3][2 * i2 + 1][2 * i1 + 1] + 0.25 * (z1[i1] + z1[i1 + 1]); } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3 + 1][2 * i2][2 * i1] = u[2 * i3 + 1][2 * i2][2 * i1] + 0.5 * z2[i1]; u[2 * i3 + 1][2 * i2][2 * i1 + 1] = u[2 * i3 + 1][2 * i2][2 * i1 + 1] + 0.25 * (z2[i1] + z2[i1 + 1]); } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3 + 1][2 * i2 + 1][2 * i1] = u[2 * i3 + 1][2 * i2 + 1][2 * i1] + 0.25 * z3[i1]; u[2 * i3 + 1][2 * i2 + 1][2 * i1 + 1] = u[2 * i3 + 1][2 * i2 + 1][2 * i1 + 1] + 0.125 * (z3[i1] + z3[i1 + 1]); } } } } else { if (n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if (n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if (n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(d3, mm3, d2, mm2, d1, mm1, t1, t2, z) for (i3 = d3; i3 <= mm3 - 1; i3++) { for (i2 = d2; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]; } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] + 0.5 * (z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2 - 1][i1 - 1]); } } for (i2 = 1; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] + 0.5 * (z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] + 0.25 * (z[i3 - 1][i2][i1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } } } #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(mm3, d2, mm2, d1, mm1, t3, t1, t2, z) for (i3 = 1; i3 <= mm3 - 1; i3++) { for (i2 = d2; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] + 0.5 * (z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] + 0.25 * (z[i3 ][i2 - 1][i1] + z[i3 ][i2 - 1][i1 - 1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2 - 1][i1 - 1]); } } for (i2 = 1; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] + 0.25 * (z[i3 ][i2][i1 - 1] + z[i3 ][i2 - 1][i1 - 1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] + 0.125 * (z[i3 ][i2][i1 ] + z[i3 ][i2 - 1][i1 ] + z[i3 ][i2][i1 - 1] + z[i3 ][i2 - 1][i1 - 1] + z[i3 - 1][i2][i1 ] + z[i3 - 1][i2 - 1][i1 ] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } } } } if (debug_vec[0] >= 1) { rep_nrm(z, mm1, mm2, mm3, "z: inter", k - 1); rep_nrm(u, n1, n2, n3, "u: inter", k); } if (debug_vec[5] >= k) { showall(z, mm1, mm2, mm3); showall(u, n1, n2, n3); } } //--------------------------------------------------------------------- // norm2u3 evaluates approximations to the L2 norm and the // uniform (or L-infinity or Chebyshev) norm, under the // assumption that the boundaries are periodic or zero. Add the // boundaries in with half weight (quarter weight on the edges // and eighth weight at the corners) for inhomogeneous boundaries. //--------------------------------------------------------------------- void norm2u3(void * or , int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz) { double (*r)[n2][n1] = (double (*)[n2][n1]) or; double s, a; int i3, i2, i1; double dn; dn = 1.0 * nx * ny * nz; s = 0.0; *rnmu = 0.0; for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 1; i1 < n1 - 1; i1++) { s = s + pow(r[i3][i2][i1], 2.0); a = fabs(r[i3][i2][i1]); if (a > *rnmu) *rnmu = a; } } } *rnm2 = sqrt(s / dn); } //--------------------------------------------------------------------- // report on norm //--------------------------------------------------------------------- void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk) { double rnm2, rnmu; norm2u3(u, n1, n2, n3, &rnm2, &rnmu, nx[kk], ny[kk], nz[kk]); printf(" Level%2d in %8s: norms =%21.14E%21.14E\n", kk, title, rnm2, rnmu); } //--------------------------------------------------------------------- // comm3 organizes the communication on all borders //--------------------------------------------------------------------- void comm3(void *ou, int n1, int n2, int n3, int kk) { double (*u)[n2][n1] = (double (*)[n2][n1])ou; int i1, i2, i3; #pragma omp parallel for default(shared) private(i3, i2) firstprivate(n3, n2, n1) for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { u[i3][i2][ 0] = u[i3][i2][n1 - 2]; u[i3][i2][n1 - 1] = u[i3][i2][ 1]; } } #pragma omp parallel for default(shared) private(i3, i1) firstprivate(n3, n1, n2) for (i3 = 1; i3 < n3 - 1; i3++) { for (i1 = 0; i1 < n1; i1++) { u[i3][ 0][i1] = u[i3][n2 - 2][i1]; u[i3][n2 - 1][i1] = u[i3][ 1][i1]; } } #pragma omp parallel for default(shared) private(i2, i1) firstprivate(n2, n1, n3) for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { u[ 0][i2][i1] = u[n3 - 2][i2][i1]; u[n3 - 1][i2][i1] = u[ 1][i2][i1]; } } } //--------------------------------------------------------------------- // zran3 loads +1 at ten randomly chosen points, // loads -1 at a different ten random points, // and zero elsewhere. //--------------------------------------------------------------------- void zran3(void *oz, int n1, int n2, int n3, int nx, int ny, int k) { double (*z)[n2][n1] = (double (*)[n2][n1])oz; int i0, m0, m1; int i1, i2, i3, d1, e1, e2, e3; double xx, x0, x1, a1, a2, ai; const int mm = 10; const double a = pow(5.0, 13.0); const double x = 314159265.0; double ten[mm][2], best; int i, j1[mm][2], j2[mm][2], j3[mm][2]; int jg[4][mm][2]; double rdummy; a1 = power(a, nx); a2 = power(a, nx * ny); zero3(z, n1, n2, n3); i = is1 - 2 + nx * (is2 - 2 + ny * (is3 - 2)); ai = power(a, i); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = x; rdummy = randlc(&x0, ai); for (i3 = 1; i3 < e3; i3++) { x1 = x0; for (i2 = 1; i2 < e2; i2++) { xx = x1; vranlc(d1, &xx, a, &(z[i3][i2][1])); rdummy = randlc(&x1, a1); } rdummy = randlc(&x0, a2); } //--------------------------------------------------------------------- // comm3(z,n1,n2,n3); // showall(z,n1,n2,n3); //--------------------------------------------------------------------- //--------------------------------------------------------------------- // each processor looks for twenty candidates //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i) firstprivate(mm) for (i = 0; i < mm; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 1; i1 < n1 - 1; i1++) { if (z[i3][i2][i1] > ten[0][1]) { ten[0][1] = z[i3][i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble(ten, j1, j2, j3, mm, 1); } if (z[i3][i2][i1] < ten[0][0]) { ten[0][0] = z[i3][i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble(ten, j1, j2, j3, mm, 0); } } } } //--------------------------------------------------------------------- // Now which of these are globally best? //--------------------------------------------------------------------- i1 = mm - 1; i0 = mm - 1; for (i = mm - 1; i >= 0; i--) { best = 0.0; if (best < ten[i1][1]) { jg[0][i][1] = 0; jg[1][i][1] = is1 - 2 + j1[i1][1]; jg[2][i][1] = is2 - 2 + j2[i1][1]; jg[3][i][1] = is3 - 2 + j3[i1][1]; i1 = i1 - 1; } else { jg[0][i][1] = 0; jg[1][i][1] = 0; jg[2][i][1] = 0; jg[3][i][1] = 0; } best = 1.0; if (best > ten[i0][0]) { jg[0][i][0] = 0; jg[1][i][0] = is1 - 2 + j1[i0][0]; jg[2][i][0] = is2 - 2 + j2[i0][0]; jg[3][i][0] = is3 - 2 + j3[i0][0]; i0 = i0 - 1; } else { jg[0][i][0] = 0; jg[1][i][0] = 0; jg[2][i][0] = 0; jg[3][i][0] = 0; } } // m1 = i1+1; // m0 = i0+1; m1 = 0; m0 = 0; /* int cnt = 0; printf(" \n"); printf(" negative charges at\n"); for (i = 0; i < mm; i++) { printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" positive charges at\n"); for (i = 0; i < mm; i++) { printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" small random numbers were\n"); for (i = mm-1; i >= 0; i--) { printf(" %15.8E", ten[i][0]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" and they were found on processor number\n"); for (i = mm-1; i >= 0; i--) { printf(" %4d", jg[0][i][0]); if (++cnt % 10 == 0) printf("\n"); } cnt = 0; printf(" large random numbers were\n"); for (i = mm-1; i >= 0; i--) { printf(" %15.8E", ten[i][1]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" and they were found on processor number\n"); for (i = mm-1; i >= 0; i--) { printf(" %4d", jg[0][i][1]); if (++cnt % 10 == 0) printf("\n"); } */ #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1) for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } for (i = mm - 1; i >= m0; i--) { z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] = -1.0; } for (i = mm - 1; i >= m1; i--) { z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] = +1.0; } comm3(z, n1, n2, n3, k); //--------------------------------------------------------------------- // showall(z,n1,n2,n3); //--------------------------------------------------------------------- } void showall(void *oz, int n1, int n2, int n3) { double (*z)[n2][n1] = (double (*)[n2][n1])oz; int i1, i2, i3; int m1, m2, m3; m1 = min(n1, 18); m2 = min(n2, 14); m3 = min(n3, 18); printf(" \n"); for (i3 = 0; i3 < m3; i3++) { for (i1 = 0; i1 < m1; i1++) { for (i2 = 0; i2 < m2; i2++) { printf("%6.3f", z[i3][i2][i1]); } printf("\n"); } printf(" - - - - - - - \n"); } printf(" \n"); } //--------------------------------------------------------------------- // power raises an integer, disguised as a double // precision real, to an integer power //--------------------------------------------------------------------- double power(double a, int n) { double aj; int nj; double rdummy; double power; power = 1.0; nj = n; aj = a; while (nj != 0) { if ((nj % 2) == 1) rdummy = randlc(&power, aj); rdummy = randlc(&aj, aj); nj = nj / 2; } return power; } //--------------------------------------------------------------------- // bubble does a bubble sort in direction dir //--------------------------------------------------------------------- void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2], int m, int ind) { double temp; int i, j_temp; if (ind == 1) { for (i = 0; i < m - 1; i++) { if (ten[i][ind] > ten[i + 1][ind]) { temp = ten[i + 1][ind]; ten[i + 1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i + 1][ind]; j1[i + 1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i + 1][ind]; j2[i + 1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i + 1][ind]; j3[i + 1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { for (i = 0; i < m - 1; i++) { if (ten[i][ind] < ten[i + 1][ind]) { temp = ten[i + 1][ind]; ten[i + 1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i + 1][ind]; j1[i + 1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i + 1][ind]; j2[i + 1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i + 1][ind]; j3[i + 1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } void zero3(void *oz, int n1, int n2, int n3) { double (*z)[n2][n1] = (double (*)[n2][n1])oz; int i1, i2, i3; #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1) for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } } double randlc( double *x, double a ) { //-------------------------------------------------------------------- // // This routine returns a uniform pseudorandom double precision number in the // range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The returned value RANDLC is normalized to be // between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain // the new seed x_1, so that subsequent calls to RANDLC using the same // arguments will generate a continuous sequence. // // This routine should produce the same results on any computer with at least // 48 mantissa bits in double precision floating point data. On 64 bit // systems, double precision should be disabled. // // David H. Bailey October 26, 1990 // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; const double r23 = 1.1920928955078125e-07; const double r46 = r23 * r23; const double t23 = 8.388608e+06; const double t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; double r; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3); *x = t3 - t46 * t4; r = r46 * (*x); return r; } void vranlc( int n, double *x, double a, double y[] ) { //-------------------------------------------------------------------- // // This routine generates N uniform pseudorandom double precision numbers in // the range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The N results are placed in Y and are normalized // to be between 0 and 1. X is updated to contain the new seed, so that // subsequent calls to VRANLC using the same arguments will generate a // continuous sequence. If N is zero, only initialization is performed, and // the variables X, A and Y are ignored. // // This routine is the standard version designed for scalar or RISC systems. // However, it should produce the same results on any single processor // computer with at least 48 mantissa bits in double precision floating point // data. On 64 bit systems, double precision should be disabled. // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; const double r23 = 1.1920928955078125e-07; const double r46 = r23 * r23; const double t23 = 8.388608e+06; const double t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; int i; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Generate N results. This loop is not vectorizable. //-------------------------------------------------------------------- for ( i = 0; i < n; i++ ) { //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3) ; *x = t3 - t46 * t4; y[i] = r46 * (*x); } return; } void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) { char size[16]; int j; printf( "\n\n %s Benchmark Completed.\n", name ); printf( " Class = %12c\n", class ); // If this is not a grid-based problem (EP, FT, CG), then // we only print n1, which contains some measure of the // problem size. In that case, n2 and n3 are both zero. // Otherwise, we print the grid size n1xn2xn3 if ( ( n2 == 0 ) && ( n3 == 0 ) ) { if ( ( name[0] == 'E' ) && ( name[1] == 'P' ) ) { sprintf( size, "%15.0lf", pow(2.0, n1) ); j = 14; if ( size[j] == '.' ) { size[j] = ' '; j--; } size[j + 1] = '\0'; printf( " Size = %15s\n", size ); } else { printf( " Size = %12d\n", n1 ); } } else { printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 ); } printf( " Iterations = %12d\n", niter ); printf( " Time in seconds = %12.2lf\n", t ); printf( " Mop/s total = %15.2lf\n", mops ); printf( " Operation type = %24s\n", optype ); if ( verified ) printf( " Verification = %12s\n", "SUCCESSFUL" ); else printf( " Verification = %12s\n", "UNSUCCESSFUL" ); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec; } /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return ( t ); } /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return ( elapsed[n] ); }
GB_unaryop__abs_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint64 // op(A') function: GB_tran__abs_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint64 ( int32_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_AxB_flopcount.c
//------------------------------------------------------------------------------ // GB_AxB_flopcount: compute flops for C<M>=A*B or C=A*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // On input, A and B are two matrices for C<M>=A*B or C=A*B. The flop count // for each B(:,j) is computed, and returned as a cumulative sum. This // function is CSR/CSC agnostic, but for simplicity of this description, assume // A and B are both CSC matrices, so that ncols(A) == nrows(B). For both CSR // and CSC, A->vdim == B->vlen holds. A and/or B may be hypersparse, in any // combination. // The complemented mask is not handled, so the flops for C<!M>=A*B is not // computed. // If present, Bflops has size (B->nvec)+1, for both standard and hypersparse // B. Let n = B->vdim be the column dimension of B (that is, B is m-by-n). // If B is a standard CSC matrix then Bflops has size n+1 == B->nvec+1, and on // output, Bflops [j] is the # of flops required to compute C (:, 0:j-1). B->h // is NULL, and is implicitly the vector 0:(n-1). // If B is hypersparse, then let Bh = B->h. Its size is B->nvec, and j = Bh // [kk] is the (kk)th column in the data structure for B. C will also be // hypersparse, and only C(:,Bh) will be computed (C may have fewer non-empty // columns than B). On output, Bflops [kk] is the number of needed flops to // compute C (:, Bh [0:kk-1]). // In both cases, Bflops [0] = 0, and Bflops [B->nvec] = total number of flops. // The size of Bflops is B->nvec+1 so that it has the same size as B->p. The // first entry of B->p and Bflops are both zero. This allows B to be sliced // either by # of entries in B (by slicing B->p) or by the flop count required // (by slicing Bflops). // This algorithm does not look at the values of M, A, or B, just their // patterns. If the mask is present, it is assumed to not be complemented. // The flop count of C=A*B or C<M>=A*B is computed for a saxpy-based method; // the work for A'*B for the dot product method is not computed. // The algorithm scans all nonzeros in B. It only scans at most the min and // max (first and last) row indices in A and M (if M is present). If A and M // are not hypersparse, the time taken is O(nnz(B)+n). If all matrices are // hypersparse, the time is O(nnz(B)*log(h)) where h = max # of vectors present // in A and M. In pseudo-MATLAB, and assuming B is in standard (not // hypersparse) form: /* [m n] = size (B) ; Bflops = zeros (1,n+1) ; % (set to zero in the caller) for each column j in B: if (B (:,j) is empty) continue ; if (M is present and M (:,j) is empty) continue ; im_first = min row index of nonzeros in M(:,j) im_last = max row index of nonzeros in M(:,j) for each k where B (k,j) is nonzero: aknz = nnz (A (:,k)) if (aknz == 0) continue ; alo = min row index of nonzeros in A(:,k) ahi = max row index of nonzeros in A(:,k) if (M is present) if (intersection (alo:ahi, im_first:im_last) empty) continue end % numerical phase will compute: C(:,j)<M(:,j)> += A(:,k)*B(k,j), % which takes aknz flops, so: Bflops (j) += aknz Bflops_per_entry (k,j) = aknz end end */ // If Bflops and Bflops_per_entry are both NULL, then only the true/false // result of the test (total_flops <= floplimit) is returned. This allows the // function to return early, once the total_flops exceeds the threshold. #include "GB_mxm.h" #include "GB_ek_slice.h" #include "GB_bracket.h" bool GB_AxB_flopcount // compute flops for C<M>=A*B or C=A*B ( int64_t *Bflops, // size B->nvec+1 and all zero, if present int64_t *Bflops_per_entry, // size nnz(B)+1 and all zero, if present const GrB_Matrix M, // optional mask matrix const GrB_Matrix A, const GrB_Matrix B, int64_t floplimit, // maximum flops to compute if Bflops NULL GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_OK_OR_NULL (GB_check (M, "M for flop count A*B", GB0)) ; ASSERT_OK (GB_check (A, "A for flop count A*B", GB0)) ; ASSERT_OK (GB_check (B, "B for flop count A*B", GB0)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT (A->vdim == B->vlen) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- int64_t bnz = GB_NNZ (B) ; int64_t bnvec = B->nvec ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (bnz + bnvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // determine the kind of result to return //-------------------------------------------------------------------------- bool check_quick_return = (Bflops == NULL) && (Bflops_per_entry == NULL) ; #ifdef GB_DEBUG if (Bflops != NULL) { // Bflops is set to zero in the calller for (int64_t kk = 0 ; kk <= bnvec ; kk++) { ASSERT (Bflops [kk] == 0) ; } } if (Bflops_per_entry != NULL) { // Bflops_per_entry is set to zero in the calller for (int64_t pB = 0 ; pB <= bnz ; pB++) { ASSERT (Bflops_per_entry [pB] == 0) ; } } #endif //-------------------------------------------------------------------------- // get the mask, if present //-------------------------------------------------------------------------- const int64_t *restrict Mh = NULL ; const int64_t *restrict Mp = NULL ; const int64_t *restrict Mi = NULL ; int64_t mnvec = 0 ; bool M_is_hyper = GB_IS_HYPER (M) ; if (M != NULL) { Mh = M->h ; Mp = M->p ; Mi = M->i ; mnvec = M->nvec ; } //-------------------------------------------------------------------------- // get A and B //-------------------------------------------------------------------------- const int64_t *restrict Ah = A->h ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ai = A->i ; int64_t anvec = A->nvec ; bool A_is_hyper = GB_IS_HYPER (A) ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bi = B->i ; bool B_is_hyper = GB_IS_HYPER (B) ; //-------------------------------------------------------------------------- // construct the parallel tasks //-------------------------------------------------------------------------- // Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 // and vectors kfirst_slice [tid] to klast_slice [tid]. The first and // last vectors may be shared with prior slices and subsequent slices. int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; ntasks = GB_IMIN (ntasks, bnz) ; ntasks = GB_IMAX (ntasks, 1) ; int64_t pstart_slice [ntasks+1] ; int64_t kfirst_slice [ntasks] ; int64_t klast_slice [ntasks] ; GB_ek_slice (pstart_slice, kfirst_slice, klast_slice, B, ntasks) ; //-------------------------------------------------------------------------- // compute flop counts for C<M> = A*B //-------------------------------------------------------------------------- int64_t Wfirst [ntasks], Wlast [ntasks], Flops [ntasks+1] ; int64_t total_flops = 0 ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // skip this task if limit already reached //---------------------------------------------------------------------- bool quick_return = false ; int64_t flops_so_far = 0 ; if (check_quick_return) { { #pragma omp atomic read flops_so_far = total_flops ; } if (flops_so_far > floplimit) continue ; } //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; int64_t task_flops = 0 ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; int64_t mpleft = 0 ; // for GB_lookup of the mask M //---------------------------------------------------------------------- // count flops for vectors kfirst to klast of B //---------------------------------------------------------------------- for (int64_t kk = kfirst ; !quick_return && (kk <= klast) ; kk++) { //------------------------------------------------------------------ // find the part of B(:,j) to be computed by this task //------------------------------------------------------------------ int64_t pB, pB_end ; GB_get_pA_and_pC (&pB, &pB_end, NULL, tid, kk, kfirst, klast, pstart_slice, NULL, NULL, Bp) ; int64_t j = (B_is_hyper) ? Bh [kk] : kk ; // C(:,j) is empty if B(:,j) is empty int64_t bjnz = pB_end - pB ; if (bjnz == 0) continue ; //------------------------------------------------------------------ // see if M(:,j) is present and non-empty //------------------------------------------------------------------ int64_t im_first = -1, im_last = -1 ; if (M != NULL) { int64_t mpright = mnvec - 1 ; int64_t pM, pM_end ; GB_lookup (M_is_hyper, Mh, Mp, &mpleft, mpright, j, &pM, &pM_end) ; int64_t mjnz = pM_end - pM ; // C(:,j) is empty if M(:,j) is empty if (mjnz == 0) continue ; // M(:,j) has at least 1 entry; get 1st and last index in M(:,j) im_first = Mi [pM] ; im_last = Mi [pM_end-1] ; } //------------------------------------------------------------------ // trim Ah on right //------------------------------------------------------------------ // Ah [0..A->nvec-1] holds the set of non-empty vectors of A, but // only vectors k corresponding to nonzero entries B(k,j) are // accessed for this vector B(:,j). If nnz (B(:,j)) > 2, prune the // search space on the right, so the remaining calls to GB_lookup // will only need to search Ah [pleft...pright-1]. pright does not // change. pleft is advanced as B(:,j) is traversed, since the // indices in B(:,j) are sorted in ascending order. int64_t pleft = 0 ; int64_t pright = anvec-1 ; if (A_is_hyper && bjnz > 2) { // trim Ah [0..pright] to remove any entries past last B(:,j) GB_bracket_right (Bi [pB_end-1], Ah, 0, &pright) ; } //------------------------------------------------------------------ // count the flops to compute C(:,j)<M(:,j)> = A*B(:,j) //------------------------------------------------------------------ int64_t bjflops = 0 ; for ( ; pB < pB_end ; pB++) { // B(k,j) is nonzero int64_t k = Bi [pB] ; // find A(:,k), reusing pleft since Bi [...] is sorted int64_t pA, pA_end ; GB_lookup (A_is_hyper, Ah, Ap, &pleft, pright, k, &pA, &pA_end); // skip if A(:,k) empty int64_t aknz = pA_end - pA ; if (aknz == 0) continue ; // skip if intersection of A(:,k) and M(:,j) is empty if (M != NULL) { // A(:,k) is non-empty; get first and last index of A(:,k) int64_t alo = Ai [pA] ; int64_t ahi = Ai [pA_end-1] ; if (ahi < im_first || alo > im_last) continue ; } // increment by flops for the single entry B(k,j) // C(:,j)<M(:,j)> += A(:,k)*B(k,j). bjflops += aknz ; if (Bflops_per_entry != NULL) { // flops for the single entry, B(k,j) Bflops_per_entry [pB] = aknz ; } // check for a quick return if (check_quick_return) { flops_so_far += aknz ; if (flops_so_far > floplimit) { // flop limit has been reached; terminate this and all // other tasks quick_return = true ; break ; } } } //------------------------------------------------------------------ // sum up the flops for this task //------------------------------------------------------------------ task_flops += bjflops ; //------------------------------------------------------------------ // log the flops for B(:,j) //------------------------------------------------------------------ if (Bflops != NULL) { if (kk == kfirst) { Wfirst [tid] = bjflops ; } else if (kk == klast) { Wlast [tid] = bjflops ; } else { Bflops [kk] = bjflops ; } } } //---------------------------------------------------------------------- // log the flops for this task //---------------------------------------------------------------------- Flops [tid] = task_flops ; if (check_quick_return) { #pragma omp atomic update total_flops += task_flops ; } } //-------------------------------------------------------------------------- // finalize the results //-------------------------------------------------------------------------- bool result ; if (check_quick_return) { // The only output of this function is the result of this test: result = (total_flops <= floplimit) ; } else { //---------------------------------------------------------------------- // cumulative sum of Bflops and Bflops_per_entry //---------------------------------------------------------------------- GB_cumsum (Flops, ntasks, NULL, 1) ; int64_t total_flops = Flops [ntasks] ; result = (total_flops <= floplimit) ; if (Bflops != NULL) { //------------------------------------------------------------------ // reduce the first and last vector of each slice //------------------------------------------------------------------ // See also Template/GB_reduce_each_vector.c int64_t kprior = -1 ; for (int tid = 0 ; tid < ntasks ; tid++) { //-------------------------------------------------------------- // sum up the partial flops that task tid computed for kfirst //-------------------------------------------------------------- int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; if (kfirst <= klast) { int64_t pB = pstart_slice [tid] ; int64_t pB_end = GB_IMIN (Bp [kfirst+1], pstart_slice [tid+1]) ; if (pB < pB_end) { if (kprior < kfirst) { // This task is the first one that did work on // B(:,kfirst), so use it to start the reduction. Bflops [kfirst] = Wfirst [tid] ; } else { // subsequent task for B(:,kfirst) Bflops [kfirst] += Wfirst [tid] ; } kprior = kfirst ; } } //-------------------------------------------------------------- // sum up the partial flops that task tid computed for klast //-------------------------------------------------------------- if (kfirst < klast) { int64_t pB = Bp [klast] ; int64_t pB_end = pstart_slice [tid+1] ; if (pB < pB_end) { /* if */ ASSERT (kprior < klast) ; { // This task is the first one that did work on // B(:,klast), so use it to start the reduction. Bflops [klast] = Wlast [tid] ; } /* else { // If kfirst < klast and B(:,klast) is not empty, // then this task is always the first one to do // work on B(:,klast), so this case is never used. ASSERT (GB_DEAD_CODE) ; // subsequent task to work on B(:,klast) Bflops [klast] += Wlast [tid] ; } */ kprior = klast ; } } } //------------------------------------------------------------------ // cumulative sum of Bflops //------------------------------------------------------------------ // Bflops = cumsum ([0 Bflops]) ; ASSERT (Bflops [bnvec] == 0) ; GB_cumsum (Bflops, bnvec, NULL, nthreads) ; // Bflops [bnvec] is now the total flop count ASSERT (total_flops == Bflops [bnvec]) ; } if (Bflops_per_entry != NULL) { // Bflops_per_entry = cumsum ([0 Bflops_per_entry]) ; ASSERT (Bflops_per_entry [bnz] == 0) ; GB_cumsum (Bflops_per_entry, bnz, NULL, nthreads) ; // Bflops_per_entry [bnz] is now the total flop count ASSERT (total_flops == Bflops_per_entry [bnz]) ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (result) ; }
GB_binop__eq_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint64) // A*D function (colscale): GB (_AxD__eq_uint64) // D*A function (rowscale): GB (_DxB__eq_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint64) // C=scalar+B GB (_bind1st__eq_uint64) // C=scalar+B' GB (_bind1st_tran__eq_uint64) // C=A+scalar GB (_bind2nd__eq_uint64) // C=A'+scalar GB (_bind2nd_tran__eq_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT64 || GxB_NO_EQ_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
example-omp.c
// PWD010: Incorrect sharing in parallel region // https://www.appentra.com/knowledge/checks/pwd010 void example(int **result, unsigned rows, unsigned cols) { int i, j; // j is implicitly shared and it should be private! #pragma omp parallel for shared(result) for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { result[i][j] = 0; } } }
op_openmp4_rt_support.c
// // header files // #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <op_lib_c.h> #include <op_lib_core.h> #include <op_rt_support.h> // // routines to move arrays to/from GPU device // void op_mvHostToDevice(void **map, int size) { if (!OP_hybrid_gpu) return; char *temp = (char*)*map; #pragma omp target enter data map(to: temp[:size]) #pragma omp target update to(temp[:size]) //TODO test } void op_cpHostToDevice(void **data_d, void **data_h, int size) { if (!OP_hybrid_gpu) return; *data_d = (char*)op_malloc(size); memcpy(*data_d, *data_h, size); char *tmp = (char *)*data_d; //TODO jo igy? decl miatt kell az enter data elm. #pragma omp target enter data map(to: tmp[:size]) #pragma omp target update to(tmp[:size]) } op_plan *op_plan_get(char const *name, op_set set, int part_size, int nargs, op_arg *args, int ninds, int *inds) { return op_plan_get_stage(name, set, part_size, nargs, args, ninds, inds, OP_STAGE_ALL); } op_plan *op_plan_get_stage(char const *name, op_set set, int part_size, int nargs, op_arg *args, int ninds, int *inds, int staging) { return op_plan_get_stage_upload(name, set, part_size, nargs, args, ninds, inds, staging, 1); } op_plan *op_plan_get_stage_upload(char const *name, op_set set, int part_size, int nargs, op_arg *args, int ninds, int *inds, int staging, int upload) { op_plan *plan = op_plan_core(name, set, part_size, nargs, args, ninds, inds, staging); if (!OP_hybrid_gpu || !upload) return plan; int set_size = set->size; for (int i = 0; i < nargs; i++) { if (args[i].idx != -1 && args[i].acc != OP_READ) { set_size += set->exec_size; break; } } if (plan->count == 1) { int *offsets = (int *)malloc((plan->ninds_staged + 1) * sizeof(int)); offsets[0] = 0; for (int m = 0; m < plan->ninds_staged; m++) { int count = 0; for (int m2 = 0; m2 < nargs; m2++) if (plan->inds_staged[m2] == m) count++; offsets[m + 1] = offsets[m] + count; } op_mvHostToDevice((void **)&(plan->ind_map), offsets[plan->ninds_staged] * set_size * sizeof(int)); for (int m = 0; m < plan->ninds_staged; m++) { plan->ind_maps[m] = &plan->ind_map[set_size * offsets[m]]; } free(offsets); int counter = 0; for (int m = 0; m < nargs; m++) if (plan->loc_maps[m] != NULL) counter++; op_mvHostToDevice((void **)&(plan->loc_map), sizeof(short) * counter * set_size); counter = 0; for (int m = 0; m < nargs; m++) if (plan->loc_maps[m] != NULL) { plan->loc_maps[m] = &plan->loc_map[set_size * counter]; counter++; } op_mvHostToDevice((void **)&(plan->ind_sizes), sizeof(int) * plan->nblocks * plan->ninds_staged); op_mvHostToDevice((void **)&(plan->ind_offs), sizeof(int) * plan->nblocks * plan->ninds_staged); op_mvHostToDevice((void **)&(plan->nthrcol), sizeof(int) * plan->nblocks); op_mvHostToDevice((void **)&(plan->thrcol), sizeof(int) * set_size); op_mvHostToDevice((void **)&(plan->col_reord), sizeof(int) * set_size); op_mvHostToDevice((void **)&(plan->offset), sizeof(int) * plan->nblocks); plan->offset_d = plan->offset; op_mvHostToDevice((void **)&(plan->nelems), sizeof(int) * plan->nblocks); plan->nelems_d = plan->nelems; op_mvHostToDevice((void **)&(plan->blkmap), sizeof(int) * plan->nblocks); plan->blkmap_d = plan->blkmap; } return plan; } void op_cuda_exit() { if (!OP_hybrid_gpu) return; op_dat_entry *item; TAILQ_FOREACH(item, &OP_dat_list, entries) { #pragma omp target exit data map(from: (item->dat)->data_d) free((item->dat)->data_d); } /* for (int ip = 0; ip < OP_plan_index; ip++) { OP_plans[ip].ind_map = NULL; OP_plans[ip].loc_map = NULL; OP_plans[ip].ind_sizes = NULL; OP_plans[ip].ind_offs = NULL; OP_plans[ip].nthrcol = NULL; OP_plans[ip].thrcol = NULL; OP_plans[ip].col_reord = NULL; OP_plans[ip].offset = NULL; OP_plans[ip].nelems = NULL; OP_plans[ip].blkmap = NULL; } */ // cudaDeviceReset ( ); } // // routines to resize constant/reduct arrays, if necessary // void reallocConstArrays(int consts_bytes) { (void) consts_bytes; } void reallocReductArrays(int reduct_bytes) { (void) reduct_bytes; } // // routines to move constant/reduct arrays // void mvConstArraysToDevice(int consts_bytes) { (void) consts_bytes; } void mvReductArraysToDevice(int reduct_bytes) { (void) reduct_bytes; } void mvReductArraysToHost(int reduct_bytes) { (void) reduct_bytes; } // // routine to fetch data from GPU to CPU (with transposing SoA to AoS if needed) // void op_cuda_get_data(op_dat dat) { if (!OP_hybrid_gpu) return; if (dat->dirty_hd == 2) dat->dirty_hd = 0; else return; #pragma omp target update from(dat->data_d[:dat->size * dat->set->size]) // transpose data if (strstr(dat->type, ":soa") != NULL || (OP_auto_soa && dat->dim > 1)) { int element_size = dat->size / dat->dim; for (int i = 0; i < dat->dim; i++) { for (int j = 0; j < dat->set->size; j++) { for (int c = 0; c < element_size; c++) { dat->data[dat->size * j + element_size * i + c] = dat->data_d[element_size * i * dat->set->size + element_size * j + c]; } } } } else { memcpy(dat->data,dat->data_d,dat->size * dat->set->size); } } void deviceSync() { // cutilSafeCall(cudaDeviceSynchronize()); } #ifndef OPMPI void cutilDeviceInit(int argc, char **argv) { (void)argc; (void)argv; // copy one scalar to initialize OpenMP env. // Improvement: later we can set default device. int tmp=0; #pragma omp target enter data map(to:tmp) OP_hybrid_gpu = 1; } void op_upload_dat(op_dat dat) { if (!OP_hybrid_gpu) return; int set_size = dat->set->size; if (strstr(dat->type, ":soa") != NULL || (OP_auto_soa && dat->dim > 1)) { int element_size = dat->size / dat->dim; for (int i = 0; i < dat->dim; i++) { for (int j = 0; j < set_size; j++) { for (int c = 0; c < element_size; c++) { dat->data_d[element_size * i * set_size + element_size * j + c] = dat->data[dat->size * j + element_size * i + c]; } } } } else { memcpy(dat->data_d,dat->data,dat->size * dat->set->size); } #pragma omp target update to(dat->data_d[:set_size*dat->size]) } void op_download_dat(op_dat dat) { if (!OP_hybrid_gpu) return; #pragma omp target update from(dat->data_d[:dat->size * dat->set->size]) int set_size = dat->set->size; if (strstr(dat->type, ":soa") != NULL || (OP_auto_soa && dat->dim > 1)) { int element_size = dat->size / dat->dim; for (int i = 0; i < dat->dim; i++) { for (int j = 0; j < set_size; j++) { for (int c = 0; c < element_size; c++) { dat->data[dat->size * j + element_size * i + c] = dat->data_d[element_size * i * set_size + element_size * j + c]; } } } } else { memcpy(dat->data,dat->data_d,dat->size * dat->set->size); } } int op_mpi_halo_exchanges(op_set set, int nargs, op_arg *args) { //TODO itt a download + dirty allitas ekv egy getdata hivassal for (int n = 0; n < nargs; n++) if (args[n].opt && args[n].argtype == OP_ARG_DAT && args[n].dat->dirty_hd == 2) { op_download_dat(args[n].dat); args[n].dat->dirty_hd = 0; } return set->size; } void op_mpi_set_dirtybit(int nargs, op_arg *args) { for (int n = 0; n < nargs; n++) { if ((args[n].opt == 1) && (args[n].argtype == OP_ARG_DAT) && (args[n].acc == OP_INC || args[n].acc == OP_WRITE || args[n].acc == OP_RW)) { args[n].dat->dirty_hd = 1; } } } int op_mpi_halo_exchanges_cuda(op_set set, int nargs, op_arg *args) { for (int n = 0; n < nargs; n++) if (args[n].opt && args[n].argtype == OP_ARG_DAT && args[n].dat->dirty_hd == 1) { op_upload_dat(args[n].dat); args[n].dat->dirty_hd = 0; } return set->size; } void op_mpi_set_dirtybit_cuda(int nargs, op_arg *args) { for (int n = 0; n < nargs; n++) { if ((args[n].opt == 1) && (args[n].argtype == OP_ARG_DAT) && (args[n].acc == OP_INC || args[n].acc == OP_WRITE || args[n].acc == OP_RW)) { args[n].dat->dirty_hd = 2; } } } void op_mpi_wait_all(int nargs, op_arg *args) { (void)nargs; (void)args; } void op_mpi_wait_all_cuda(int nargs, op_arg *args) { (void)nargs; (void)args; } void op_mpi_reset_halos(int nargs, op_arg *args) { (void)nargs; (void)args; } void op_mpi_barrier() {} void *op_mpi_perf_time(const char *name, double time) { (void)name; (void)time; return (void *)name; } #ifdef COMM_PERF void op_mpi_perf_comms(void *k_i, int nargs, op_arg *args) { (void)k_i; (void)nargs; (void)args; } #endif void op_mpi_reduce_float(op_arg *args, float *data) { (void)args; (void)data; } void op_mpi_reduce_double(op_arg *args, double *data) { (void)args; (void)data; } void op_mpi_reduce_int(op_arg *args, int *data) { (void)args; (void)data; } void op_mpi_reduce_bool(op_arg *args, bool *data) { (void)args; (void)data; } void op_partition(const char *lib_name, const char *lib_routine, op_set prime_set, op_map prime_map, op_dat coords) { (void)lib_name; (void)lib_routine; (void)prime_set; (void)prime_map; (void)coords; } void op_partition_reverse() {} void op_compute_moment(double t, double *first, double *second) { *first = t; *second = t * t; } void op_compute_moment_across_times(double* times, int ntimes, bool ignore_zeros, double *first, double *second) { *first = 0.0; *second = 0.0f; int n = 0; for (int i=0; i<ntimes; i++) { if (ignore_zeros && (times[i] == 0.0f)) { continue; } *first += times[i]; *second += times[i] * times[i]; n++; } if (n != 0) { *first /= (double)n; *second /= (double)n; } } int op_is_root() { return 1; } #endif
operators.h
/* Project Name : OpenMEEG © INRIA and ENPC (contributors: Geoffray ADDE, Maureen CLERC, Alexandre GRAMFORT, Renaud KERIVEN, Jan KYBIC, Perrine LANDREAU, Théodore PAPADOPOULO, Emmanuel OLIVI Maureen.Clerc.AT.inria.fr, keriven.AT.certis.enpc.fr, kybic.AT.fel.cvut.cz, papadop.AT.inria.fr) The OpenMEEG software is a C++ package for solving the forward/inverse problems of electroencephalography and magnetoencephalography. This software is governed by the CeCILL-B license under French law and abiding by the rules of distribution of free software. You can use, modify and/ or redistribute the software under the terms of the CeCILL-B license as circulated by CEA, CNRS and INRIA at the following URL "http://www.cecill.info". As a counterpart to the access to the source code and rights to copy, modify and redistribute granted by the license, users are provided only with a limited warranty and the software's authors, the holders of the economic rights, and the successive licensors have only limited liability. In this respect, the user's attention is drawn to the risks associated with loading, using, modifying and/or developing or reproducing the software by the user in light of its specific status of free software, that may mean that it is complicated to manipulate, and that also therefore means that it is reserved for developers and experienced professionals having in-depth computer knowledge. Users are therefore encouraged to load and test the software's suitability as regards their requirements in conditions enabling the security of their systems and/or data to be ensured and, more generally, to use and operate it in the same conditions as regards security. The fact that you are presently reading this means that you have had knowledge of the CeCILL-B license and that you accept its terms. */ /// \file /// \brief File containing the integral operators. #pragma once #include <iostream> #include <vector.h> #include <matrix.h> #include <symmatrix.h> #include <sparse_matrix.h> #include <geometry.h> #include <integrator.h> #include <analytics.h> #include <progressbar.h> namespace OpenMEEG { // TODO: Use overloading and remove the internal suffix. void operatorSinternal(const Mesh&,Matrix&,const Vertices&,const double&); void operatorDinternal(const Mesh&,Matrix&,const Vertices&,const double&); void operatorFerguson(const Vect3&,const Mesh&,Matrix&,const unsigned&,const double&); void operatorDipolePotDer(const Vect3&,const Vect3&,const Mesh&,Vector&,const double&,const unsigned,const bool); void operatorDipolePot(const Vect3&,const Vect3&,const Mesh&,Vector&,const double&,const unsigned,const bool); namespace Details { // #define ADAPT_LHS template <template <typename,typename> class Integrator> void operatorDipolePot(const Vect3& r0,const Vect3& q,const Mesh& m,Vector& rhs,const double& coeff,const unsigned gauss_order) { static analyticDipPot anaDP; anaDP.init(q,r0); Integrator<double,analyticDipPot> gauss(0.001); gauss->setOrder(gauss_order); #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_RANGEFOR for (const auto& triangle : m.triangles()) { #elif defined OPENMP_ITERATOR for (Triangles::const_iterator tit=m.triangles().begin();tit<m.triangles().end();++tit) { const Triangle& triangle = *tit; #else for (int i=0;i<m.triangles().size();++i) { const Triangle& triangle = *(m.triangles().begin()+i); #endif const double d = gauss->integrate(anaDP,triangle); #pragma omp critical rhs(triangle.index()) += d*coeff; } } // T can be a Matrix or SymMatrix template <typename T> inline void operatorD(const Triangle& T1,const Triangle& T2,T& mat,const double& coeff,const unsigned gauss_order) { //this version of operatorD add in the Matrix the contribution of T2 on T1 // for all the P1 functions it gets involved // consider varying order of quadrature with the distance between T1 and T2 analyticD3 analyD(T2); #ifdef ADAPT_LHS AdaptiveIntegrator<Vect3, analyticD3> gauss(0.005); gauss.setOrder(gauss_order); #else STATIC_OMP Integrator<Vect3, analyticD3> gauss(gauss_order); #endif const Vect3 total = gauss.integrate(analyD,T1); for (unsigned i=0; i<3; ++i) mat(T1.index(),T2.vertex(i).index()) += total(i)*coeff; } inline double operatorS(const analyticS& analyS,const Triangle& T2,const unsigned gauss_order) { #ifdef ADAPT_LHS AdaptiveIntegrator<double,analyticS> gauss(0.005); #else STATIC_OMP Integrator<double,analyticS> gauss; #endif gauss.setOrder(gauss_order); return gauss.integrate(analyS,T2); } template <typename T> inline double operatorN(const Vertex& V1,const Vertex& V2,const Mesh& m1,const Mesh& m2,const T& mat) { const bool same_shared_vertex = ((&m1!=&m2) && (V1==V2)); const double factor = (same_shared_vertex) ? 0.5 : 0.25; double result = 0.0; for (const auto& tp1 : m1.triangles(V1)) { const Edge& edge1 = tp1->edge(V1); const Vect3& CB1 = edge1.vertex(0)-edge1.vertex(1); const unsigned ind1 = tp1->index()-m1.triangles().front().index(); for (const auto& tp2 : m2.triangles(V2)) { const unsigned ind2 = tp2->index()-m2.triangles().front().index(); // In the second case, we here divided (precalculated) operatorS by the product of areas. const double Iqr = (m1.current_barrier() || m2.current_barrier()) ? mat(ind1,ind2) : mat(tp1->index(),tp2->index())/(tp1->area()*tp2->area()); const Edge& edge2 = tp2->edge(V2); const Vect3& CB2 = edge2.vertex(0)-edge2.vertex(1); result -= factor*Iqr*dotprod(CB1,CB2); } } return result; } inline Vect3 operatorFerguson(const Vect3& x,const Vertex& V,const Mesh& m) { Vect3 result; result = 0.0; // Loop over triangles of which V is a vertex for (const auto& tp : m.triangles(V)) { const Triangle& T = *tp; const Edge& edge = T.edge(V); // A, B are the two opposite vertices to V (triangle A, B, V) const Vertex& A = edge.vertex(0); const Vertex& B = edge.vertex(1); const Vect3 AB = (A-B)*(0.5/T.area()); analyticS analyS(V,A,B); const double opS = analyS.f(x); result += (AB*opS); } return result; } inline double operatorP1P0(const Triangle& T2,const Vertex& V1) { double result = 0.; if (T2.contains(V1)) result = T2.area()/3.0; return result; } template <typename T> void operatorD(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) { // This function (OPTIMIZED VERSION) has the following arguments: // the 2 interacting meshes // the storage Matrix for the result // the coefficient to be appleid to each matrix element (depending on conductivities, ...) // the gauss order parameter (for adaptive integration) // In this version of the function, in order to skip multiple computations of the same quantities // loops are run over the triangles but the Matrix cannot be filled in this function anymore // That's why the filling is done is function Details::operatorD // ProgressBar pb(m1.triangles().size()); const Triangles& m1_triangles = m1.triangles(); #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_RANGEFOR for (const auto& triangle1 : m1_triangles) { #elif defined OPENMP_ITERATOR for (Triangles::const_iterator tit1=m1_triangles.begin();tit1<m1_triangles.end();++tit1) { const Triangle& triangle1 = *tit1; #else for (int i1=0; i1 < m1_triangles.size(); ++i1) { const Triangle& triangle1 = *(m1_triangles.begin()+i1); #endif for (const auto& triangle2 : m2.triangles()) Details::operatorD(triangle1,triangle2,mat,coeff,gauss_order); ++pb; } } } template <typename T> void operatorN(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) { // This function has the following arguments: // the 2 interacting meshes // the storage Matrix for the result // the coefficient to be applied to each matrix element (depending on conductivities, ...) // the gauss order parameter (for adaptive integration) std::cout << "OPERATOR N ... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl; if (&m1==&m2) { auto NUpdate = [&](const Mesh& m,const auto& M) { ProgressBar pb(m1.vertices().size()); for (auto vit1=m.vertices().begin();vit1!=m.vertices().end();++vit1) { #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_ITERATOR for (auto vit2=vit1;vit2<m.vertices().end();++vit2) { #else for (int i2=0;i2<=vit1-m1.vertices().begin();++i2) { const auto vit2 = m1.vertices().begin()+i2; #endif mat((*vit1)->index(),(*vit2)->index()) += Details::operatorN(**vit1,**vit2,m,m,M)*coeff; } ++pb; } }; if (m1.current_barrier()) { // Precompute operator S divided by the product of triangles area. ProgressBar pb(m1.triangles().size()); SymMatrix matS(m1.triangles().size()); for (Triangles::const_iterator tit1=m1.triangles().begin();tit1!=m1.triangles().end();++tit1) { const analyticS analyS(*tit1); const unsigned ind1 = tit1->index()-m1.triangles().front().index(); #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_ITERATOR for (Triangles::const_iterator tit2=tit1;tit2<m1.triangles().end();++tit2) { #else for (int i2=tit1-m1.triangles().begin();i2<m1.triangles().size();++i2) { const Triangles::const_iterator tit2 = m1.triangles().begin()+i2; #endif const unsigned ind2 = tit2->index()-m2.triangles().front().index(); matS(ind1,ind2) = Details::operatorS(analyS,*tit2,gauss_order)/(tit1->area()*tit2->area()); } ++pb; } NUpdate(m1,matS); } else { NUpdate(m1,mat); } } else { auto NUpdate = [&](const Mesh& m1,const Mesh& m2,const auto& M) { ProgressBar pb(m1.vertices().size()); const VerticesRefs& v2 = m2.vertices(); for (const auto& vertex1 : m1.vertices()) { #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_RANGEFOR for (const auto& vertex2 : v2) { #elif defined OPENMP_ITERATOR for (auto vit2=v2.begin();vit2<v2.end();++vit2) { const Vertex* vertex2 = *vit2; #else for (int i2=0;i2<v2.size();++i2) { const Vertex* vertex2 = *(v2.begin()+i2); #endif mat(vertex1->index(),vertex2->index()) += Details::operatorN(*vertex1,*vertex2,m1,m2,M)*coeff; } ++pb; } }; if (m1.current_barrier() || m2.current_barrier()) { // Precompute operator S divided by the product of triangles area. Matrix matS(m1.triangles().size(),m2.triangles().size()); ProgressBar pb(m1.triangles().size()); unsigned i = 0; for (const auto& triangle1 : m1.triangles()) { const analyticS analyS(triangle1); const unsigned ind1 = triangle1.index()-m1.triangles().front().index(); const Triangles& m2_triangles = m2.triangles(); #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_RANGEFOR for (const auto& triangle2 : m2_triangles) { #elif defined OPENMP_ITERATOR for (Triangles::const_iterator tit2=m2_triangles.begin();tit2<m2_triangles.end();++tit2) { const Triangle& triangle2 = *tit2; #else for (int i2=0;i2<m2_triangles.size();++i2) { const Triangle& triangle2 = *(m2_triangles.begin()+i2); #endif const unsigned ind2 = triangle2.index()-m2_triangles.front().index(); matS(ind1,ind2) = Details::operatorS(analyS,triangle2,gauss_order)/(triangle1.area()*triangle2.area()); } ++pb; } NUpdate(m1,m2,matS); } else { NUpdate(m1,m2,mat); } } } template <typename T> void operatorS(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) { // This function has the following arguments: // the 2 interacting meshes // the storage Matrix for the result // the coefficient to be applied to each matrix element (depending on conductivities, ...) // the gauss order parameter (for adaptive integration) std::cout << "OPERATOR S ... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl; // The operator S is given by Sij=\Int G*PSI(I, i)*Psi(J, j) with // PSI(A, a) is a P0 test function on layer A and triangle a if (&m1==&m2) { ProgressBar pb(m1.triangles().size()); for (Triangles::const_iterator tit1=m1.triangles().begin(); tit1!=m1.triangles().end(); ++tit1,++pb) { const analyticS analyS(*tit1); #pragma omp parallel for #if defined OPENMP_ITERATOR for (Triangles::const_iterator tit2=tit1;tit2<m1.triangles().end();++tit2) { #else for (int i2=tit1-m1.triangles().begin();i2<m1.triangles().size();++i2) { const Triangles::const_iterator tit2 = m1.triangles().begin()+i2; #endif mat(tit1->index(),tit2->index()) = Details::operatorS(analyS,*tit2,gauss_order)*coeff; } } } else { // TODO check the symmetry of Details::operatorS. // if we invert tit1 with tit2: results in HeadMat differs at 4.e-5 which is too big. // using ADAPT_LHS with tolerance at 0.000005 (for Details::opS) drops this at 6.e-6. (but increase the computation time) ProgressBar pb(m1.triangles().size()); for (const auto& triangle1 : m1.triangles()) { const analyticS analyS(triangle1); const Triangles& m2_triangles = m2.triangles(); #pragma omp parallel for #if defined NO_OPENMP || defined OPENMP_RANGEFOR for (const auto& triangle2 : m2_triangles) { #elif defined OPENMP_ITERATOR for (Triangles::const_iterator tit2=m2_triangles.begin();tit2<m2_triangles.end();++tit2) { const Triangle& triangle2 = *tit2; #else for (int i2=0;i2<m2_triangles.size();++i2) { const Triangle& triangle2 = *(m2_triangles.begin()+i2); #endif mat(triangle1.index(),triangle2.index()) = Details::operatorS(analyS,triangle2,gauss_order)*coeff; } ++pb; } } } template <typename T> void operatorD(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) { // This function (OPTIMIZED VERSION) has the following arguments: // the 2 interacting meshes // the storage Matrix for the result // the coefficient to be appleid to each matrix element (depending on conductivities, ...) // the gauss order parameter (for adaptive integration) std::cout << "OPERATOR D... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl; Details::operatorD(m1,m2,mat,coeff,gauss_order); } template <typename T> void operatorDstar(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) { // This function (OPTIMIZED VERSION) has the following arguments: // the 2 interacting meshes // the storage Matrix for the result // the coefficient to be appleid to each matrix element (depending on conductivities, ...) // the gauss order parameter (for adaptive integration) std::cout << "OPERATOR D*... (arg : mesh " << m1.name() << " , mesh " << m2.name() << ')' << std::endl; Details::operatorD(m2,m1,mat,coeff,gauss_order); } template <typename T> void operatorP1P0(const Mesh& m,T& mat,const double& coeff) { // This time mat(i, j)+= ... the Matrix is incremented by the P1P0 operator std::cout << "OPERATOR P1P0... (arg : mesh " << m.name() << " )" << std::endl; for (const auto& triangle : m.triangles()) for (const auto& vertex : triangle) mat(triangle.index(),vertex->index()) += Details::operatorP1P0(triangle,*vertex)*coeff; } }
GB_unop__identity_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_bool_bool // C type: bool // A type: bool // cast: bool cij = aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
multind.c
/* Copyright 2013-2015 The Regents of the University of California. * Copyright 2016-2020. Uecker Lab. University Medical Center Göttingen. * Copyright 2017. Intel Corporation. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2020 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2019-2020 Sebastian Rosenzweig * 2013 Frank Ong <frankong@berkeley.edu> * 2017 Michael J. Anderson <michael.j.anderson@intel.com> * * Generic operations on multi-dimensional arrays. Most functions * come in two flavours: * * 1. A basic version which takes the number of dimensions, an array * of long integers specifing the size of each dimension, the pointers * to the data, and the size of each element and other required parameters. * The data is assumed to be stored in column-major format. * * 2. An extended version which takes an array of long integers which * specifies the strides for each argument. * * All functions should work on CPU and GPU and md_copy can be used * to copy between CPU and GPU. * */ #define _GNU_SOURCE #include <string.h> #include <assert.h> #include <stdbool.h> #ifdef _WIN32 #include <malloc.h> #else #include <alloca.h> #endif #include <strings.h> #include "misc/misc.h" #include "misc/types.h" #include "misc/debug.h" #include "misc/nested.h" #include "num/optimize.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "multind.h" /** * Generic functions which loops over all dimensions of a set of * multi-dimensional arrays and calls a given function for each position. */ void md_nary(unsigned int C, unsigned int D, const long dim[D], const long* str[C], void* ptr[C], md_nary_fun_t fun) { if (0 == D) { NESTED_CALL(fun, (ptr)); return; } for (long i = 0; i < dim[D - 1]; i++) { void* moving_ptr[C]; for (unsigned int j = 0; j < C; j++) moving_ptr[j] = ptr[j] + i * str[j][D - 1]; md_nary(C, D - 1, dim, str, moving_ptr, fun); } } /** * Generic functions which loops over all dimensions of a set of * multi-dimensional arrays and calls a given function for each position. * This functions tries to parallelize over the dimensions indicated * with flags. */ void md_parallel_nary(unsigned int C, unsigned int D, const long dim[D], unsigned long flags, const long* str[C], void* ptr[C], md_nary_fun_t fun) { flags = flags & md_nontriv_dims(D, dim); if (0 == flags) { md_nary(C, D, dim, str, ptr, fun); return; } long dimc[D]; md_select_dims(D, ~flags, dimc, dim); // Collect all parallel dimensions int nparallel = 0; int parallel_b[D]; long parallel_dim[D]; long total_iterations = 1L; while (0 != flags) { int b = ffsl(flags & -flags) - 1; assert(MD_IS_SET(flags, b)); flags = MD_CLEAR(flags, b); debug_printf(DP_DEBUG4, "Parallelize: %d\n", dim[b]); parallel_b[nparallel] = b; parallel_dim[nparallel] = dim[b]; total_iterations *= parallel_dim[nparallel]; nparallel++; } #pragma omp parallel for for (long i = 0; i < total_iterations; i++) { // Recover place in parallel iteration space long iter_i[D]; long ii = i; for (int p = nparallel - 1; p >= 0; p--) { iter_i[p] = ii % parallel_dim[p]; ii /= parallel_dim[p]; } void* moving_ptr[C]; for (unsigned int j = 0; j < C; j++) { moving_ptr[j] = ptr[j]; for(int p = 0; p < nparallel; p++) moving_ptr[j] += iter_i[p] * str[j][parallel_b[p]]; } md_nary(C, D, dimc, str, moving_ptr, fun); } } static void md_parallel_loop_r(unsigned int D, unsigned int N, const long dim[static N], unsigned int flags, const long pos[static N], md_loop_fun_t fun) { if (0 == D) { NESTED_CALL(fun, (pos)); return; } D--; // we need to make a copy because firstprivate needs to see // an array instead of a pointer long pos_copy[N]; for (unsigned int i = 0; i < N; i++) pos_copy[i] = pos[i]; #pragma omp parallel for firstprivate(pos_copy) if ((1 < dim[D]) && (flags & (1 << D))) for (int i = 0; i < dim[D]; i++) { pos_copy[D] = i; md_parallel_loop_r(D, N, dim, flags, pos_copy, fun); } } /** * Generic function which loops over all dimensions and calls a given * function passing the current indices as argument. * * Runs fun(data, position) for all position in dim * */ void md_parallel_loop(unsigned int D, const long dim[static D], unsigned long flags, md_loop_fun_t fun) { long pos[D]; md_parallel_loop_r(D, D, dim, flags, pos, fun); } static void md_loop_r(unsigned int D, const long dim[D], long pos[D], md_loop_fun_t fun) { if (0 == D) { NESTED_CALL(fun, (pos)); return; } D--; for (pos[D] = 0; pos[D] < dim[D]; pos[D]++) md_loop_r(D, dim, pos, fun); } /** * Generic function which loops over all dimensions and calls a given * function passing the current indices as argument. * * Runs fun( position ) for all position in dim * */ void md_loop(unsigned int D, const long dim[D], md_loop_fun_t fun) { long pos[D]; md_loop_r(D, dim, pos, fun); } /** * Computes the next position. Returns true until last index. */ bool md_next(unsigned int D, const long dims[D], unsigned long flags, long pos[D]) { if (0 == D--) return false; if (md_next(D, dims, flags, pos)) return true; if (MD_IS_SET(flags, D)) { assert((0 <= pos[D]) && (pos[D] < dims[D])); if (++pos[D] < dims[D]) return true; pos[D] = 0; } return false; } /** * Returns offset for position in a multidimensional array * * return pos[0]*strides[0] + ... + pos[D-1]*strides[D-1] * * @param D number of dimensions * @param dim dimensions array */ long md_calc_offset(unsigned int D, const long strides[D], const long position[D]) { long pos = 0; for (unsigned int i = 0; i < D; i++) pos += strides[i] * position[i]; return pos; } static long md_calc_size_r(unsigned int D, const long dim[D], size_t size) { if (0 == D) return size; return md_calc_size_r(D - 1, dim, size * dim[D - 1]); } /** * Returns the number of elements * * return dim[0]*dim[1]*...*dim[D-1] * * @param D number of dimensions * @param dim dimensions array */ long md_calc_size(unsigned int D, const long dim[D]) { return md_calc_size_r(D, dim, 1); } /** * Computes the number of smallest dimensions which are stored * contineously, i.e. can be accessed as a block of memory. * */ unsigned int md_calc_blockdim(unsigned int D, const long dim[D], const long str[D], size_t size) { long dist = size; unsigned int i = 0; for (i = 0; i < D; i++) { if (!((str[i] == dist) || (dim[i] == 1))) break; dist *= dim[i]; } return i; } /** * Copy dimensions specified by flags and set remaining dimensions to 1 * * odims = [ 1 idims[1] idims[2] 1 1 idims[5] ] * * @param D number of dimensions * @param flags bitmask specifying which dimensions to copy * @param odims output dimensions * @param idims input dimensions */ void md_select_dims(unsigned int D, unsigned long flags, long odims[D], const long idims[D]) { md_copy_dims(D, odims, idims); for (unsigned int i = 0; i < D; i++) if (!MD_IS_SET(flags, i)) odims[i] = 1; } /** * Copy dimensions * * odims[i] = idims[i] */ void md_copy_dims(unsigned int D, long odims[D], const long idims[D]) { memcpy(odims, idims, D * sizeof(long)); } /** * Copy strides * * ostrs[i] = istrs[i] */ void md_copy_strides(unsigned int D, long ostrs[D], const long istrs[D]) { memcpy(ostrs, istrs, D * sizeof(long)); } /** * Set all dimensions to value * * dims[i] = val */ void md_set_dims(unsigned int D, long dims[D], long val) { for (unsigned int i = 0; i < D; i++) dims[i] = val; } /** * returns whether or not @param pos is a valid index of an array of dimension @param dims */ bool md_is_index(unsigned int D, const long pos[D], const long dims[D]) { if (D == 0) return true; return ((pos[0] >= 0) && (pos[0] < dims[0]) && md_is_index(D - 1, pos + 1, dims + 1)); } /** * return whether some other dimensions are >1 */ bool md_check_dimensions(unsigned int N, const long dims[N], unsigned int flags) { long d[N]; md_select_dims(N, ~flags, d, dims); return (1 != md_calc_size(N, d)); } /** * Check if dimensions at 'flags' position are equal */ bool md_check_equal_dims(unsigned int N, const long dims1[N], const long dims2[N], unsigned int flags) { return ( md_check_bounds(N, flags, dims1, dims2) && md_check_bounds(N, flags, dims2, dims1)); } /* * compute non-trivial (> 1) dims */ unsigned long md_nontriv_dims(unsigned int D, const long dims[D]) { unsigned long flags = 0; for (unsigned int i = 0; i < D; i++) if (dims[i] > 1) flags = MD_SET(flags, i); return flags; } /* * compute non-trivial (!= 0) strides */ unsigned long md_nontriv_strides(unsigned int D, const long strs[D]) { unsigned long flags = 0; for (unsigned int i = 0; i < D; i++) if (strs[i] != 0) flags = MD_SET(flags, i); return flags; } /** * Set all dimensions to one * * dims[i] = 1 */ void md_singleton_dims(unsigned int D, long dims[D]) { for (unsigned int i = 0; i < D; i++) dims[i] = 1; } /** * Set all strides to one * * dims[i] = 1 */ void md_singleton_strides(unsigned int D, long strs[D]) { for (unsigned int i = 0; i < D; i++) strs[i] = 0; } /** * Check dimensions for compatibility. Dimensions must be equal or * where indicated by a set bit in flags one must be equal to one * in atleast one of the arguments. */ bool md_check_compat(unsigned int D, unsigned long flags, const long dim1[D], const long dim2[D]) { if (0 == D) return true; D--; if ((dim1[D] == dim2[D]) || (MD_IS_SET(flags, D) && ((1 == dim1[D]) || (1 == dim2[D])))) return md_check_compat(D, flags, dim1, dim2); return false; } void md_merge_dims(unsigned int N, long out_dims[N], const long dims1[N], const long dims2[N]) { assert(md_check_compat(N, ~0UL, dims1, dims2)); for (unsigned int i = 0; i < N; i++) out_dims[i] = (1 == dims1[i]) ? dims2[i] : dims1[i]; } /** * dim1 must be bounded by dim2 where a bit is set */ bool md_check_bounds(unsigned int D, unsigned long flags, const long dim1[D], const long dim2[D]) { if (0 == D--) return true; if (!MD_IS_SET(flags, D) || (dim1[D] <= dim2[D])) return md_check_bounds(D, flags, dim1, dim2); return false; } /** * Set the output's flagged dimensions to the minimum of the two input dimensions * * odims = [ MIN(idims1[0],idims2[0] ... MIN(idims1[D-1],idims2[D-1]) ] * * @param D number of dimensions * @param flags bitmask specifying which dimensions to minimize * @param odims output dimensions * @param idims1 input 1 dimensions * @param idims2 input 2 dimensions */ void md_min_dims(unsigned int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D]) { for (unsigned int i = 0; i < D; i++) if (MD_IS_SET(flags, i)) odims[i] = MIN(idims1[i], idims2[i]); } /** * Set the output's flagged dimensions to the maximum of the two input dimensions * * odims = [ MAX(idims1[0],idims2[0] ... MAX(idims1[D-1],idims2[D-1]) ] * * @param D number of dimensions * @param flags bitmask specifying which dimensions to maximize * @param odims output dimensions * @param idims1 input 1 dimensions * @param idims2 input 2 dimensions */ void md_max_dims(unsigned int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D]) { for (unsigned int i = 0; i < D; i++) if (MD_IS_SET(flags, i)) odims[i] = MAX(idims1[i], idims2[i]); } /** * Zero out array (with strides) * * ptr[i] = 0 */ void md_clear2(unsigned int D, const long dim[D], const long str[D], void* ptr, size_t size) { const long (*nstr[1])[D] = { (const long (*)[D])str }; #ifdef USE_CUDA bool use_gpu = cuda_ondevice(ptr); #endif unsigned long flags = 0; for (unsigned int i = 0; i < D; i++) if (0 == str[i]) flags |= MD_BIT(i); long dim2[D]; md_select_dims(D, ~flags, dim2, dim); NESTED(void, nary_clear, (struct nary_opt_data_s* opt_data, void* ptr[])) { size_t size2 = size * opt_data->size; #ifdef USE_CUDA if (use_gpu) { cuda_clear(size2, ptr[0]); return; } #endif memset(ptr[0], 0, size2); }; optimized_nop(1, MD_BIT(0), D, dim2, nstr, (void*[1]){ ptr }, (size_t[1]){ size }, nary_clear); } /** * Calculate strides in column-major format * (smallest index is sequential) * * @param D number of dimensions * @param array of calculates strides * @param dim array of dimensions * @param size of a single element */ long* md_calc_strides(unsigned int D, long str[D], const long dim[D], size_t size) { long old = size; for (unsigned int i = 0; i < D; i++) { str[i] = (1 == dim[i]) ? 0 : old; old *= dim[i]; } return str; } /** * Zero out array (without strides) * * ptr[i] = 0 * * @param D number of dimensions * @param dim dimensions array * @param ptr pointer to data to clear * @param size sizeof() */ void md_clear(unsigned int D, const long dim[D], void* ptr, size_t size) { md_clear2(D, dim, MD_STRIDES(D, dim, size), ptr, size); } /** * Copy array (with strides) * * optr[i] = iptr[i] */ void md_copy2(unsigned int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size) { #if 0 // this is for a fun comparison between our copy engine and FFTW extern void fft2(unsigned int D, const long dim[D], unsigned int flags, const long ostr[D], void* optr, const long istr[D], const void* iptr); if (sizeof(complex float) == size) fft2(D, dim, 0, ostr, optr, istr, iptr); #endif #ifdef USE_CUDA bool use_gpu = cuda_ondevice(optr) || cuda_ondevice(iptr); #if 1 long tostr[D]; long tistr[D]; long tdims[D]; md_copy_strides(D, tostr, ostr); md_copy_strides(D, tistr, istr); md_copy_dims(D, tdims, dim); long (*nstr2[2])[D] = { &tostr, &tistr }; int ND = optimize_dims_gpu(2, D, tdims, nstr2); assert(ND <= (int)D); #if 1 // permute dims with 0 input strides or negative in/output strides to the end // these might be permuted to the inner dimensions by optimize_dims and break the strided copy unsigned int perm[ND]; for (int i = 0, j = 0; i < ND; i++) { if ( (0 >= (*nstr2[1])[i]) || (0 >= (*nstr2[0])[i])) { perm[ND - 1 -j] = i; j += 1; } else { perm[i - j] = i; } } long tmp[ND]; md_permute_dims(ND, perm, tmp, tdims); md_copy_dims(ND, tdims, tmp); md_permute_dims(ND, perm, tmp, tostr); md_copy_dims(ND, tostr, tmp); md_permute_dims(ND, perm, tmp, tistr); md_copy_dims(ND, tistr, tmp); #endif #if 1 //fill like copies unsigned long fill_flags = md_nontriv_dims(D, tdims) & ~md_nontriv_strides(D, tistr) & md_nontriv_strides(D, tostr); if (use_gpu && (0 != fill_flags)) { int idx = md_min_idx(fill_flags); long tdims2[ND]; long pos[ND]; md_select_dims(ND, ~MD_BIT(idx), tdims2, tdims); md_singleton_strides(ND, pos); md_copy2(ND, tdims2, tostr, optr, tistr, iptr, size); pos[idx] = 1; while (pos[idx] < tdims[idx]) { tdims2[idx] = MIN(pos[idx], tdims[idx] - pos[idx]); md_copy2(ND, tdims2, tostr, optr + md_calc_offset(ND, tostr, pos), tostr, optr, size); pos[idx] += tdims2[idx]; } return; } #endif size_t sizes[2] = { size, size }; int skip = min_blockdim(2, ND, tdims, nstr2, sizes); debug_printf(DP_DEBUG4, "md_copy_2 skip=%d\n", skip); debug_print_dims(DP_DEBUG4, ND, tdims); debug_print_dims(DP_DEBUG4, ND, (*nstr2[0])); debug_print_dims(DP_DEBUG4, ND, (*nstr2[1])); if ( use_gpu && (ND - skip > 0)) { assert(skip < ND); long ostr2 = (*nstr2[0])[skip]; long istr2 = (*nstr2[1])[skip]; if (!( (ostr2 > 0) && (istr2 > 0))) goto out; void* nptr[2] = { optr, (void*)iptr }; long sizes[2] = { md_calc_size(skip, tdims) * size, tdims[skip] }; skip++; const long* nstr[2] = { *nstr2[0] + skip, *nstr2[1] + skip }; long* sizesp = sizes; // because of clang void** nptrp = nptr; NESTED(void, nary_strided_copy, (void* ptr[])) { debug_printf(DP_DEBUG4, "CUDA 2D copy %ld %ld %ld %ld %ld %ld\n", sizesp[0], sizesp[1], ostr2, istr2, nptrp[0], nptrp[1]); cuda_memcpy_strided(sizesp, ostr2, ptr[0], istr2, ptr[1]); }; md_nary(2, ND - skip, tdims + skip, nstr, nptr, nary_strided_copy); return; } out: ; #endif #endif const long (*nstr[2])[D] = { (const long (*)[D])ostr, (const long (*)[D])istr }; NESTED(void, nary_copy, (struct nary_opt_data_s* opt_data, void* ptr[])) { size_t size2 = size * opt_data->size; #ifdef USE_CUDA if (use_gpu) { cuda_memcpy(size2, ptr[0], ptr[1]); return; } #endif memcpy(ptr[0], ptr[1], size2); }; optimized_nop(2, MD_BIT(0), D, dim, nstr, (void*[2]){ optr, (void*)iptr }, (size_t[2]){ size, size }, nary_copy); } /** * Copy array (without strides) * * optr[i] = iptr[i] */ void md_copy(unsigned int D, const long dim[D], void* optr, const void* iptr, size_t size) { long str[D]; md_calc_strides(D, str, dim, size); md_copy2(D, dim, str, optr, str, iptr, size); } #ifdef USE_CUDA // copied from flpmath.c static void* gpu_constant(const void* vp, size_t size) { return md_gpu_move(1, (long[1]){ 1 }, vp, size); } #endif /** * Fill array with value pointed by pointer (with strides) * * ptr[i] = iptr[0] */ void md_fill2(unsigned int D, const long dim[D], const long str[D], void* ptr, const void* iptr, size_t size) { #ifdef USE_CUDA if (cuda_ondevice(ptr) && (!cuda_ondevice(iptr))) { void* giptr = gpu_constant(iptr, size); md_fill2(D, dim, str, ptr, giptr, size); md_free(giptr); return; } #endif long istr[D]; md_singleton_strides(D, istr); md_copy2(D, dim, str, ptr, istr, iptr, size); } /** * Fill array with value pointed by pointer (without strides) * * ptr[i] = iptr[0] */ void md_fill(unsigned int D, const long dim[D], void* ptr, const void* iptr, size_t size) { md_fill2(D, dim, MD_STRIDES(D, dim, size), ptr, iptr, size); } /** * Swap values between a number of arrays (with strides) */ void md_circular_swap2(unsigned int M, unsigned int D, const long dims[D], const long* strs[M], void* ptr[M], size_t size) { size_t sizes[M]; for (unsigned int i = 0; i < M; i++) sizes[i] = size; const long (*nstrs[M])[D]; for (unsigned int i = 0; i < M; i++) nstrs[i] = (const long (*)[D])strs[i]; NESTED(void, nary_swap, (struct nary_opt_data_s* opt_data, void* ptr[])) { size_t size2 = size * opt_data->size; char* tmp = (size2 < 32) ? alloca(size2) : xmalloc(size2); #ifdef USE_CUDA assert(!cuda_ondevice(ptr[0])); assert(!cuda_ondevice(ptr[1])); #endif memcpy(tmp, ptr[0], size2); for (unsigned int i = 0; i < M - 1; i++) memcpy(ptr[i], ptr[i + 1], size2); memcpy(ptr[M - 1], tmp, size2); if (size2 >= 32) xfree(tmp); }; optimized_nop(M, (1 << M) - 1, D, dims, nstrs, ptr, sizes, nary_swap); } /** * Swap values between a number of arrays */ void md_circular_swap(unsigned M, unsigned int D, const long dims[D], void* ptr[M], size_t size) { long strs[M][D]; md_calc_strides(D, strs[0], dims, size); const long* strp[M]; strp[0] = strs[0]; for (unsigned int i = 1; i < M; i++) { md_copy_strides(D, strs[i], strs[0]); strp[i] = strs[i]; } md_circular_swap2(M, D, dims, strp, ptr, size); } /** * Swap values between two arrays (with strides) * * iptr[i] = optr[i] and optr[i] = iptr[i] */ void md_swap2(unsigned int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size) { md_circular_swap2(2, D, dim, (const long*[2]){ ostr, istr }, (void*[2]){ optr, iptr }, size); } /** * Swap values between two arrays (without strides) * * iptr[i] = optr[i] and optr[i] = iptr[i] */ void md_swap(unsigned int D, const long dim[D], void* optr, void* iptr, size_t size) { long str[D]; md_calc_strides(D, str, dim, size); md_swap2(D, dim, str, optr, str, iptr, size); } /** * Move a block from an array to another array (with strides) * */ void md_move_block2(unsigned int D, const long dim[D], const long opos[D], const long odim[D], const long ostr[D], void* optr, const long ipos[D], const long idim[D], const long istr[D], const void* iptr, size_t size) { for (unsigned int i = 0; i < D; i++) { assert(dim[i] <= odim[i]); assert(dim[i] <= idim[i]); assert((0 <= opos[i]) && (opos[i] <= odim[i] - dim[i])); assert((0 <= ipos[i]) && (ipos[i] <= idim[i] - dim[i])); } long ioff = md_calc_offset(D, istr, ipos); long ooff = md_calc_offset(D, ostr, opos); md_copy2(D, dim, ostr, optr + ooff, istr, iptr + ioff, size); } /** * Move a block from an array to another array (without strides) * */ void md_move_block(unsigned int D, const long dim[D], const long opos[D], const long odim[D], void* optr, const long ipos[D], const long idim[D], const void* iptr, size_t size) { md_move_block2(D, dim, opos, odim, MD_STRIDES(D, odim, size), optr, ipos, idim, MD_STRIDES(D, idim, size), iptr, size); } /** * Copy a block from an array to another array (with strides) * * Block dimensions are min(idim , odim) * * if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d] * * if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d] * */ void md_copy_block2(unsigned int D, const long pos[D], const long odim[D], const long ostr[D], void* optr, const long idim[D], const long istr[D], const void* iptr, size_t size) { long dim[D]; long ipos[D]; long opos[D]; for (unsigned int i = 0; i < D; i++) { assert((idim[i] != odim[i]) || (0 == pos[i])); dim[i] = MIN(odim[i], idim[i]); ipos[i] = 0; opos[i] = 0; if (idim[i] != dim[i]) ipos[i] = pos[i]; if (odim[i] != dim[i]) opos[i] = pos[i]; } md_move_block2(D, dim, opos, odim, ostr, optr, ipos, idim, istr, iptr, size); } /** * Copy a block from an array to another array (without strides) * * Block dimensions are min(idim , odim) * * if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d] * * if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d] * */ void md_copy_block(unsigned int D, const long pos[D], const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size) { md_copy_block2(D, pos, odim, MD_STRIDES(D, odim, size), optr, idim, MD_STRIDES(D, idim, size), iptr, size); } /** * Resize an array by zero-padding or by truncation at the end. * * optr = [iptr 0 0 0 0] * */ void md_resize(unsigned int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size) { long pos[D]; memset(pos, 0, D * sizeof(long)); md_clear(D, odim, optr, size); md_copy_block(D, pos, odim, optr, idim, iptr, size); } /** * Pad an array by val at the end. * * optr = [iptr val val val val] * */ void md_pad(unsigned int D, const void* val, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size) { long pos[D]; memset(pos, 0, D * sizeof(long)); md_fill(D, odim, optr, val, size); md_copy_block(D, pos, odim, optr, idim, iptr, size); } /** * Resize an array by zero-padding or by truncation at both ends symmetrically. * * optr = [0 0 iptr 0 0] * */ void md_resize_center(unsigned int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size) { // the definition of the center position corresponds // to the one used in the FFT. long pos[D]; for (unsigned int i = 0; i < D; i++) pos[i] = labs((odim[i] / 2) - (idim[i] / 2)); md_clear(D, odim, optr, size); md_copy_block(D, pos, odim, optr, idim, iptr, size); } /** * Pad an array on both ends by val. * * optr = [val val iptr val val] * */ void md_pad_center(unsigned int D, const void* val, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size) { long pos[D]; for (unsigned int i = 0; i < D; i++) pos[i] = labs((odim[i] / 2) - (idim[i] / 2)); md_fill(D, odim, optr, val, size); md_copy_block(D, pos, odim, optr, idim, iptr, size); } /** * Extract slice from array specified by flags (with strides) * * optr = iptr(pos[0], :, pos[2], :, :) * */ void md_slice2(unsigned int D, unsigned long flags, const long pos[D], const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size) { long odim[D]; md_select_dims(D, ~flags, odim, dim); md_copy_block2(D, pos, odim, ostr, optr, dim, istr, iptr, size); } /** * Extract slice from array specified by flags (with strides) * * optr = iptr(pos[0], :, pos[2], :, :) * */ void md_slice(unsigned int D, unsigned long flags, const long pos[D], const long dim[D], void* optr, const void* iptr, size_t size) { long odim[D]; md_select_dims(D, ~flags, odim, dim); md_slice2(D, flags, pos, dim, MD_STRIDES(D, odim, size), optr, MD_STRIDES(D, dim, size), iptr, size); } /** * Permute array (with strides) * * optr[order[i]] = iptr[i] * */ void md_permute2(unsigned int D, const unsigned int order[D], const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size) { unsigned int flags = 0; long ostr2[D]; for (unsigned int i = 0; i < D; i++) { assert(order[i] < D); assert(odims[i] == idims[order[i]]); flags = MD_SET(flags, order[i]); ostr2[order[i]] = ostr[i]; } assert(MD_BIT(D) == flags + 1); md_copy2(D, idims, ostr2, optr, istr, iptr, size); } /** * Permute array (without strides) * * optr[order[i]] = iptr[i] * */ void md_permute(unsigned int D, const unsigned int order[D], const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size) { md_permute2(D, order, odims, MD_STRIDES(D, odims, size), optr, idims, MD_STRIDES(D, idims, size), iptr, size); } /** * Permute dimensions * * */ void md_permute_dims(unsigned int D, const unsigned int order[D], long odims[D], const long idims[D]) { for (unsigned int i = 0; i < D; i++) odims[i] = idims[order[i]]; } static void md_transpose_order(unsigned int D, unsigned int order[D], unsigned int dim1, unsigned int dim2) { assert(dim1 < D); assert(dim2 < D); for (unsigned int i = 0; i < D; i++) order[i] = i; order[dim1] = dim2; order[dim2] = dim1; } /** * Transpose dimensions * * */ void md_transpose_dims(unsigned int D, unsigned int dim1, unsigned int dim2, long odims[D], const long idims[D]) { unsigned int order[D]; md_transpose_order(D, order, dim1, dim2); md_permute_dims(D, order, odims, idims); } /** * Tranpose array (with strides) * * optr[dim2] = iptr[dim1] * * optr[dim1] = iptr[dim2] * */ void md_transpose2(unsigned int D, unsigned int dim1, unsigned int dim2, const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size) { for (unsigned int i = 0; i < D; i++) if ((i != dim1) && (i != dim2)) assert(odims[i] == idims[i]); assert(odims[dim1] == idims[dim2]); assert(odims[dim2] == idims[dim1]); unsigned int order[D]; md_transpose_order(D, order, dim1, dim2); md_permute2(D, order, odims, ostr, optr, idims, istr, iptr, size); } /** * Tranpose array (without strides) * * optr[dim2] = iptr[dim1] * * optr[dim1] = iptr[dim2] * */ void md_transpose(unsigned int D, unsigned int dim1, unsigned int dim2, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size) { md_transpose2(D, dim1, dim2, odims, MD_STRIDES(D, odims, size), optr, idims, MD_STRIDES(D, idims, size), iptr, size); } static void md_flip_inpl2(unsigned int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size); /** * Swap input and output while flipping selected dimensions * at the same time. */ void md_swap_flip2(unsigned int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size) { #if 1 int i; for (i = D - 1; i >= 0; i--) if ((1 != dims[i]) && MD_IS_SET(flags, i)) break; if (-1 == i) { md_swap2(D, dims, ostr, optr, istr, iptr, size); return; } assert(1 < dims[i]); assert(ostr[i] != 0); assert(istr[i] != 0); long dims2[D]; md_copy_dims(D, dims2, dims); dims2[i] = dims[i] / 2; long off = (dims[i] + 1) / 2; assert(dims2[i] + off == dims[i]); md_swap_flip2(D, dims2, flags, ostr, optr, istr, iptr + off * istr[i], size); md_swap_flip2(D, dims2, flags, ostr, optr + off * ostr[i], istr, iptr, size); // odd, swap center plane // (we should split in three similar sized chunks instead) dims2[i] = 1; if (1 == dims[i] % 2) md_swap_flip2(D, dims2, flags, ostr, optr + (off - 1) * ostr[i], istr, iptr + (off - 1) * istr[i], size); #else // simpler, but more swaps md_swap2(D, dims, ostr, optr, istr, iptr, size); md_flip_inpl2(D, dims, flags, ostr, optr, size); md_flip_inpl2(D, dims, flags, istr, iptr, size); #endif } /** * Swap input and output while flipping selected dimensions * at the same time. */ void md_swap_flip(unsigned int D, const long dims[D], unsigned long flags, void* optr, void* iptr, size_t size) { long strs[D]; md_calc_strides(D, strs, dims, size); md_swap_flip2(D, dims, flags, strs, optr, strs, iptr, size); } static void md_flip_inpl2(unsigned int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size) { int i; for (i = D - 1; i >= 0; i--) if ((1 != dims[i]) && MD_IS_SET(flags, i)) break; if (-1 == i) return; assert(1 < dims[i]); assert(str[i] != 0); long dims2[D]; md_copy_dims(D, dims2, dims); dims2[i] = dims[i] / 2; long off = str[i] * (0 + (dims[i] + 1) / 2); md_swap_flip2(D, dims2, flags, str, ptr, str, ptr + off, size); } /** * Flip array (with strides) * * optr[dims[D] - 1 - i] = iptr[i] * */ void md_flip2(unsigned int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size) { if (optr == iptr) { assert(ostr == istr); md_flip_inpl2(D, dims, flags, ostr, optr, size); return; } long off = 0; long ostr2[D]; for (unsigned int i = 0; i < D; i++) { ostr2[i] = ostr[i]; if (MD_IS_SET(flags, i)) { ostr2[i] = -ostr[i]; off += (dims[i] - 1) * ostr[i]; } } md_copy2(D, dims, ostr2, optr + off, istr, iptr, size); } /** * Flip array (without strides) * * optr[dims[D] - 1 - i] = iptr[i] * */ void md_flip(unsigned int D, const long dims[D], unsigned long flags, void* optr, const void* iptr, size_t size) { long str[D]; md_calc_strides(D, str, dims, size); md_flip2(D, dims, flags, str, optr, str, iptr, size); } /** * Reshape array (with strides) * * Only flagged dims may flow */ void md_reshape2(unsigned int D, unsigned long flags, const long odims[D], const long ostrs[D], void* optr, const long idims[D], const long istrs[D], const void* iptr, size_t size) { assert(md_calc_size(D, odims) == md_calc_size(D, idims)); assert(md_check_equal_dims(D, odims, idims, ~flags)); unsigned int order[D]; unsigned int j = 0; for (unsigned int i = 0; i < D; i++) if (MD_IS_SET(flags, i)) order[j++] = i; for (unsigned int i = 0; i < D; i++) if (!MD_IS_SET(flags, i)) order[j++] = i; assert(D == j); unsigned int iorder[D]; for (unsigned int i = 0; i < D; i++) iorder[order[i]] = i; long dims2[D]; long strs2[D]; // FIXME: we could avoid the buffer in some cases void* buf = md_alloc_sameplace(D, odims, size, optr); md_permute_dims(D, order, dims2, idims); md_calc_strides(D, strs2, dims2, size); md_permute2(D, order, dims2, strs2, buf, idims, istrs, iptr, size); md_permute_dims(D, order, dims2, odims); md_calc_strides(D, strs2, dims2, size); md_permute2(D, iorder, odims, ostrs, optr, dims2, strs2, buf, size); md_free(buf); } /** * Reshape array (without strides) * * Only flagged dims may flow */ void md_reshape(unsigned int D, unsigned long flags, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size) { assert(md_calc_size(D, odims) == md_calc_size(D, idims)); assert(md_check_equal_dims(D, odims, idims, ~flags)); long ostrs[D]; md_calc_strides(D, ostrs, odims, size); long istrs[D]; md_calc_strides(D, istrs, idims, size); if (md_check_equal_dims(D, ostrs, istrs, ~flags)) { // strides consistent! md_copy(D, odims, optr, iptr, size); } else { md_reshape2(D, flags, odims, ostrs, optr, idims, istrs, iptr, size); } } bool md_compare2(unsigned int D, const long dims[D], const long str1[D], const void* src1, const long str2[D], const void* src2, size_t size) { __block bool eq = true; const long (*nstr[2])[D] = { (const long (*)[D])str1, (const long (*)[D])str2 }; NESTED(void, nary_cmp, (struct nary_opt_data_s* opt_data, void* ptrs[])) { size_t size2 = size * opt_data->size; bool eq2 = (0 == memcmp(ptrs[0], ptrs[1], size2)); #pragma omp atomic eq &= eq2; }; optimized_nop(2, 0u, D, dims, nstr, (void*[2]){ (void*)src1, (void*)src2 }, (size_t[2]){ size, size }, nary_cmp); return eq; } bool md_compare(unsigned int D, const long dims[D], const void* src1, const void* src2, size_t size) { long str[D]; md_calc_strides(D, str, dims, size); return md_compare2(D, dims, str, src1, str, src2, size); } static void md_septrafo_r(unsigned int D, unsigned int R, long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun) { if (0 == R--) return; md_septrafo_r(D, R, dimensions, flags, strides, ptr, fun); if (MD_IS_SET(flags, R)) { void* nptr[1] = { ptr }; const long* nstrides[1] = { strides }; long dimsR = dimensions[R]; long strsR = strides[R]; // because of clang dimensions[R] = 1; // we made a copy in md_septrafo2 NESTED(void, nary_septrafo, (void* ptr[])) { fun(dimsR, strsR, ptr[0]); }; //md_nary_parallel(1, D, dimensions, nstrides, nptr, &data, nary_septrafo); md_nary(1, D, dimensions, nstrides, nptr, nary_septrafo); dimensions[R] = dimsR; } } /** * Apply a separable transformation along selected dimensions. * */ void md_septrafo2(unsigned int D, const long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun) { long dimcopy[D]; md_copy_dims(D, dimcopy, dimensions); md_septrafo_r(D, D, dimcopy, flags, strides, ptr, fun); } /** * Apply a separable transformation along selected dimensions. * */ void md_septrafo(unsigned int D, const long dims[D], unsigned long flags, void* ptr, size_t size, md_trafo_fun_t fun) { md_septrafo2(D, dims, flags, MD_STRIDES(D, dims, size), ptr, fun); } /** * Copy diagonals from array specified by flags (with strides) * * dst(i, i, :, i, :) = src(i, i, :, i, :) * */ void md_copy_diag2(unsigned int D, const long dims[D], unsigned long flags, const long str1[D], void* dst, const long str2[D], const void* src, size_t size) { long stride1 = 0; long stride2 = 0; long count = -1; for (unsigned int i = 0; i < D; i++) { if (MD_IS_SET(flags, i)) { if (count < 0) count = dims[i]; assert(dims[i] == count); stride1 += str1[i]; stride2 += str2[i]; } } long xdims[D]; md_select_dims(D, ~flags, xdims, dims); for (long i = 0; i < count; i++) md_copy2(D, xdims, str1, dst + i * stride1, str2, src + i * stride2, size); } /** * Copy diagonals from array specified by flags (without strides) * * dst(i ,i ,: ,i , :) = src(i ,i ,: ,i ,:) * */ void md_copy_diag(unsigned int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size) { long str[D]; md_calc_strides(D, str, dims, size); md_copy_diag2(D, dims, flags, str, dst, str, src, size); } /** * Fill diagonals specified by flags with value (without strides) * * dst(i, i, :, i, :) = src[0] * */ void md_fill_diag(unsigned int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size) { long str2[D]; md_singleton_strides(D, str2); md_copy_diag2(D, dims, flags, MD_STRIDES(D, dims, size), dst, str2, src, size); } static void md_circ_shift_inpl2(unsigned int D, const long dims[D], const long center[D], const long strs[D], void* dst, size_t size) { #if 0 long dims1[D]; long dims2[D]; md_copy_dims(D, dims1, dims); md_copy_dims(D, dims2, dims); unsigned int i; for (i = 0; i < D; i++) { if (0 != center[i]) { dims1[i] = center[i]; dims2[i] = dims[i] - center[i]; break; } } if (i == D) return; long off = strs[i] * center[i]; // cool but slow, instead we want to have a chain of swaps md_flip2(D, dims, MD_BIT(i), strs, dst, strs, dst, size); md_flip2(D, dims1, MD_BIT(i), strs, dst, strs, dst, size); md_flip2(D, dims2, MD_BIT(i), strs, dst + off, strs, dst + off, size); // also not efficient, we want to merge the chain of swaps long center2[D]; md_copy_dims(D, center2, center); center2[i] = 0; md_circ_shift_inpl2(D, dims, center2, strs, dst, size); #else // use tmp for now unsigned int i; for (i = 0; i < D; i++) if (0 != center[i]) break; if (i == D) return; long tmp_strs[D]; md_calc_strides(D, tmp_strs, dims, size); void* tmp = md_alloc_sameplace(D, dims, size, dst); md_copy2(D, dims, tmp_strs, tmp, strs, dst, size); md_circ_shift2(D, dims, center, strs, dst, tmp_strs, tmp, size); md_free(tmp); #endif } /** * Circularly shift array (with strides) * * dst[mod(i + center)] = src[i] * */ void md_circ_shift2(unsigned int D, const long dimensions[D], const long center[D], const long str1[D], void* dst, const long str2[D], const void* src, size_t size) { long pos[D]; for (unsigned int i = 0; i < D; i++) { // FIXME: it would be better to calc modulo pos[i] = center[i]; while (pos[i] < 0) pos[i] += dimensions[i]; } unsigned int i = 0; // FIXME :maybe we shoud search the other way? while ((i < D) && (0 == pos[i])) i++; if (D == i) { md_copy2(D, dimensions, str1, dst, str2, src, size); return; } if (dst == src) { assert(str1 == str2); md_circ_shift_inpl2(D, dimensions, pos, str1, dst, size); return; } long shift = pos[i]; assert(shift != 0); long dim1[D]; long dim2[D]; md_copy_dims(D, dim1, dimensions); md_copy_dims(D, dim2, dimensions); dim1[i] = shift; dim2[i] = dimensions[i] - shift; assert((dim1[i] >= 0) && (dim2[i] >= 0)); pos[i] = 0; //printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions)); md_circ_shift2(D, dim1, pos, str1, dst, str2, src + dim2[i] * str2[i], size); md_circ_shift2(D, dim2, pos, str1, dst + dim1[i] * str1[i], str2, src, size); } /** * Circularly shift array (without strides) * * dst[mod(i + center)] = src[i] * */ void md_circ_shift(unsigned int D, const long dimensions[D], const long center[D], void* dst, const void* src, size_t size) { long strides[D]; md_calc_strides(D, strides, dimensions, size); md_circ_shift2(D, dimensions, center, strides, dst, strides, src, size); } /** * Circularly extend array (with strides) * */ void md_circ_ext2(unsigned int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size) { long ext[D]; for (unsigned int i = 0; i < D; i++) { ext[i] = dims1[i] - dims2[i]; assert(ext[i] >= 0); assert(ext[i] <= dims2[i]); } unsigned int i = 0; // FIXME :maybe we shoud search the other way? while ((i < D) && (0 == ext[i])) i++; if (D == i) { md_copy2(D, dims1, strs1, dst, strs2, src, size); return; } long dims1_crop[D]; long dims2_crop[D]; long ext_dims[D]; md_copy_dims(D, dims1_crop, dims1); md_copy_dims(D, dims2_crop, dims2); md_copy_dims(D, ext_dims, dims1); dims1_crop[i] = dims2[i]; dims2_crop[i] = ext[i]; ext_dims[i] = ext[i]; ext[i] = 0; //printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions)); md_circ_ext2(D, dims1_crop, strs1, dst, dims2, strs2, src, size); md_circ_ext2(D, ext_dims, strs1, dst + dims2[i] * strs1[i], dims2_crop, strs2, src, size); } /** * Circularly extend array (without strides) * */ void md_circ_ext(unsigned int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size) { md_circ_ext2(D, dims1, MD_STRIDES(D, dims1, size), dst, dims2, MD_STRIDES(D, dims2, size), src, size); } /** * Periodically extend array (with strides) * */ void md_periodic2(unsigned int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size) { long dims1B[2 * D]; long strs1B[2 * D]; long strs2B[2 * D]; for (unsigned int i = 0; i < D; i++) { assert(0 == dims1[i] % dims2[i]); // blocks dims1B[2 * i + 0] = dims2[i]; strs1B[2 * i + 0] = strs1[i]; strs2B[2 * i + 0] = strs2[i]; // periodic copies dims1B[2 * i + 0] = dims1[i] / dims2[i]; strs1B[2 * i + 0] = strs1[i] * dims2[i]; strs2B[2 * i + 0] = 0; } md_copy2(D, dims1B, strs1B, dst, strs2B, src, size); } /** * Periodically extend array (without strides) * */ void md_periodic(unsigned int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size) { md_periodic2(D, dims1, MD_STRIDES(D, dims1, size), dst, dims2, MD_STRIDES(D, dims2, size), src, size); } /** * Allocate CPU memory * * return pointer to CPU memory */ void* md_alloc(unsigned int D, const long dimensions[D], size_t size) { return xmalloc(md_calc_size(D, dimensions) * size); } /** * Allocate CPU memory and clear * * return pointer to CPU memory */ void* md_calloc(unsigned int D, const long dimensions[D], size_t size) { void* ptr = md_alloc(D, dimensions, size); md_clear(D, dimensions, ptr, size); return ptr; } #ifdef USE_CUDA /** * Allocate GPU memory * * return pointer to GPU memory */ void* md_alloc_gpu(unsigned int D, const long dimensions[D], size_t size) { return cuda_malloc(md_calc_size(D, dimensions) * size); } /** * Allocate GPU memory and copy from CPU pointer * * return pointer to GPU memory */ void* md_gpu_move(unsigned int D, const long dims[D], const void* ptr, size_t size) { if (NULL == ptr) return NULL; void* gpu_ptr = md_alloc_gpu(D, dims, size); md_copy(D, dims, gpu_ptr, ptr, size); return gpu_ptr; } #endif /** * Allocate memory on the same device (CPU/GPU) place as ptr * * return pointer to CPU memory if ptr is in CPU or to GPU memory if ptr is in GPU */ void* md_alloc_sameplace(unsigned int D, const long dimensions[D], size_t size, const void* ptr) { #ifdef USE_CUDA return (cuda_ondevice(ptr) ? md_alloc_gpu : md_alloc)(D, dimensions, size); #else assert(0 != ptr); return md_alloc(D, dimensions, size); #endif } /** * Check whether memory is at sameplace */ bool md_is_sameplace(const void* ptr1, const void* ptr2) { assert(NULL != ptr1); assert(NULL != ptr2); #ifdef USE_CUDA return cuda_ondevice(ptr1) == cuda_ondevice(ptr2); #else return true; #endif } /** * Free CPU/GPU memory * */ void md_free(const void* ptr) { #ifdef USE_CUDA if (cuda_ondevice(ptr)) cuda_free((void*)ptr); else #endif xfree(ptr); } int md_max_idx(unsigned long flags) { int i = -1; for ( ; 0 != flags; i++) flags /= 2; return i; } int md_min_idx(unsigned long flags) { return ffsl(flags) - 1; }
crop_and_resize.c
#include <TH/TH.h> #include <stdio.h> #include <math.h> void CropAndResizePerBox( const float * image_data, const int batch_size, const int depth, const int image_height, const int image_width, const float * boxes_data, const int * box_index_data, const int start_box, const int limit_box, float * corps_data, const int crop_height, const int crop_width, const float extrapolation_value ) { const int image_channel_elements = image_height * image_width; const int image_elements = depth * image_channel_elements; const int channel_elements = crop_height * crop_width; const int crop_elements = depth * channel_elements; int b; #pragma omp parallel for for (b = start_box; b < limit_box; ++b) { const float * box = boxes_data + b * 4; const float y1 = box[0]; const float x1 = box[1]; const float y2 = box[2]; const float x2 = box[3]; const int b_in = box_index_data[b]; if (b_in < 0 || b_in >= batch_size) { printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size); exit(-1); } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { for (int x = 0; x < crop_width; ++x) { for (int d = 0; d < depth; ++d) { // crops(b, y, x, d) = extrapolation_value; corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value; } } continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value; } continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float *pimage = image_data + b_in * image_elements + d * image_channel_elements; const float top_left = pimage[top_y_index * image_width + left_x_index]; const float top_right = pimage[top_y_index * image_width + right_x_index]; const float bottom_left = pimage[bottom_y_index * image_width + left_x_index]; const float bottom_right = pimage[bottom_y_index * image_width + right_x_index]; const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp; } } // end for x } // end for y } // end for b } void crop_and_resize_forward( THFloatTensor * image, THFloatTensor * boxes, // [y1, x1, y2, x2] THIntTensor * box_index, // range in [0, batch_size) const float extrapolation_value, const int crop_height, const int crop_width, THFloatTensor * crops ) { //const int batch_size = image->size[0]; //const int depth = image->size[1]; //const int image_height = image->size[2]; //const int image_width = image->size[3]; //const int num_boxes = boxes->size[0]; const int batch_size = THFloatTensor_size(image, 0); const int depth = THFloatTensor_size(image, 1); const int image_height = THFloatTensor_size(image, 2); const int image_width = THFloatTensor_size(image, 3); const int num_boxes = THFloatTensor_size(boxes, 0); // init output space THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width); THFloatTensor_zero(crops); // crop_and_resize for each box CropAndResizePerBox( THFloatTensor_data(image), batch_size, depth, image_height, image_width, THFloatTensor_data(boxes), THIntTensor_data(box_index), 0, num_boxes, THFloatTensor_data(crops), crop_height, crop_width, extrapolation_value ); } void crop_and_resize_backward( THFloatTensor * grads, THFloatTensor * boxes, // [y1, x1, y2, x2] THIntTensor * box_index, // range in [0, batch_size) THFloatTensor * grads_image // resize to [bsize, c, hc, wc] ) { // shape //const int batch_size = grads_image->size[0]; //const int depth = grads_image->size[1]; //const int image_height = grads_image->size[2]; //const int image_width = grads_image->size[3]; //const int num_boxes = grads->size[0]; //const int crop_height = grads->size[2]; //const int crop_width = grads->size[3]; const int batch_size = THFloatTensor_size(grads_image, 0); const int depth = THFloatTensor_size(grads_image, 1); const int image_height = THFloatTensor_size(grads_image, 2); const int image_width = THFloatTensor_size(grads_image, 3); const int num_boxes = THFloatTensor_size(grads, 0); const int crop_height = THFloatTensor_size(grads,2); const int crop_width = THFloatTensor_size(grads,3); // n_elements const int image_channel_elements = image_height * image_width; const int image_elements = depth * image_channel_elements; const int channel_elements = crop_height * crop_width; const int crop_elements = depth * channel_elements; // init output space THFloatTensor_zero(grads_image); // data pointer const float * grads_data = THFloatTensor_data(grads); const float * boxes_data = THFloatTensor_data(boxes); const int * box_index_data = THIntTensor_data(box_index); float * grads_image_data = THFloatTensor_data(grads_image); for (int b = 0; b < num_boxes; ++b) { const float * box = boxes_data + b * 4; const float y1 = box[0]; const float x1 = box[1]; const float y2 = box[2]; const float x2 = box[3]; const int b_in = box_index_data[b]; if (b_in < 0 || b_in >= batch_size) { printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size); exit(-1); } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements; const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x]; const float dtop = (1 - y_lerp) * grad_val; pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop; pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop; const float dbottom = y_lerp * grad_val; pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom; pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom; } // end d } // end x } // end y } // end b }
t_initialize.c
//-----------------------BEGIN NOTICE -- DO NOT EDIT----------------------- // NASA Goddard Space Flight Center Land Information System (LIS) v7.2 // // Copyright (c) 2015 United States Government as represented by the // Administrator of the National Aeronautics and Space Administration. // All Rights Reserved. //-------------------------END NOTICE -- DO NOT EDIT----------------------- #include <stdlib.h> /* malloc */ #include <unistd.h> /* sysconf */ #include "gpt.h" /* ** Array (1 per thread) of linked lists of timers, and last timer in each list */ struct node **timers = NULL; struct node **last = NULL; long ticks_per_sec; /* ** Define lock arrays depending upon the type of threading done */ #if ( defined THREADED_OMP ) omp_lock_t lock; #elif ( defined THREADED_PTHREADS ) pthread_mutex_t t_mutex = PTHREAD_MUTEX_INITIALIZER; pthread_t *threadid; #endif float *overhead; /* wallclock estimate of timer overhead */ int *max_indent_level; /* maximum indentation level */ int numthreads = 1; /* number of threads. 1 is for no threading */ Boolean t_initialized = false; /* whether t_initialize has been called */ Boolean wallenabled = false; /* wallclock timer stats enabled */ Boolean usrsysenabled = false; /* usr & sys timer stats enabled */ Boolean pclenabled = false; /* enable PCL library */ Boolean pcl_cyclesenabled = false; /* enable PCL cycle count */ int pcl_cyclesindex = -1; /* index for PCL cycle count */ struct PossibleEvent possible_event[] = { {usrsys, true, "Usr Sys "}, {wall, true, "Wallclock "}, #ifdef HAVE_PCL {pcl_start, false, " "}, /* bracket PCL entries */ {pcl_l1dcache_miss, false, "l1 D miss "}, {pcl_l2cache_miss, false, "L2 miss "}, {pcl_cycles, false, "Cycles "}, {pcl_elapsed_cycles, false, "E-Cycles "}, {pcl_fp_instr, false, "FP instr "}, {pcl_loadstore_instr, false, "L/S instr "}, {pcl_instr, false, "Instruct "}, {pcl_stall, false, "Stall "}, {pcl_end, false, " "}, /* bracket PCL entries */ #endif }; struct Event **event = NULL; int nevent = 0; int npossible = sizeof (possible_event) / sizeof (struct PossibleEvent); /* ** Needed by PCL library: otherwise unused */ PCL_DESCR_TYPE *descr; int counter_list[PCL_COUNTER_MAX]; int ncounter = 0; /* number of PCL counters */ PCL_CNT_TYPE *overhead_pcl; /* overhead counter (cycles) */ /* ** t_initialize (): Initialization routine must be called from single-threaded ** region before any other timing routines may be called. The need for this ** routine could be eliminated if not targetting timing library for threaded ** capability. ** ** return value: 0 (success) or -1 (failure) */ int t_initialize () { int n; /* index */ int nbytes; /* number of bytes for malloc */ int ret; /* return code */ /* ** Determine number of ticks per second for conversion use by other t_pr(), t_stamp() */ if ((ticks_per_sec = sysconf (_SC_CLK_TCK)) == -1) return t_error ("t_initialize: token _SC_CLK_TCK is not defined\n"); #if ( ! defined DISABLE_TIMERS ) if (t_initialized) return t_error ("t_initialize has already been called\n"); #if ( defined THREADED_OMP ) /* ** OMP: must call init_lock before using the lock (get_thread_num()) */ omp_init_lock (&lock); numthreads = omp_get_max_threads(); #elif ( defined THREADED_PTHREADS ) numthreads = MAX_THREADS; #endif /* ** Allocate space for global arrays */ nbytes = numthreads * sizeof (struct node *); if ((timers = (struct node **) malloc (nbytes)) == 0) return t_error ("malloc failure: %d items\n", numthreads); if ((last = (struct node **) malloc (nbytes)) == 0) return t_error ("malloc failure: %d items\n", numthreads); nbytes = numthreads * sizeof (float); if ((overhead = (float *) malloc (nbytes)) == 0) return t_error ("malloc failure: %d items\n", numthreads); nbytes = numthreads * sizeof (PCL_CNT_TYPE); if ((overhead_pcl = (PCL_CNT_TYPE *) malloc (nbytes)) == 0) return t_error ("malloc failure: %d items\n", numthreads); nbytes = numthreads * sizeof (int); if ((max_indent_level = (int *) malloc (nbytes)) == 0) return t_error ("malloc failure for %d items\n", numthreads); /* ** Initialize array values */ for (n = 0; n < numthreads; n++) { timers[n] = 0; last[n] = 0; overhead[n] = 0.; overhead_pcl[n] = 0; max_indent_level[n] = 0; } #ifdef THREADED_PTHREADS /* ** In the pthreads case, we must manage the threadid array which maps ** physical thread id's to logical id's */ nbytes = numthreads * sizeof (pthread_t); if ((threadid = (pthread_t *) malloc (nbytes)) == 0) return t_error ("malloc failure for %d items\n", numthreads); /* ** Reset numthreads to 1 and define the threadid array now that initialization ** is done. */ threadid[0] = pthread_self (); numthreads = 1; #endif if (get_thread_num () > 0) return t_error ("t_initialize: should only be called by master thread\n"); for (n = 0; n < npossible; n++) { if (possible_event[n].enabled) { if (possible_event[n].name == usrsys) usrsysenabled = true; if (possible_event[n].name == wall) wallenabled = true; if ((event = realloc (event, (nevent+1) * sizeof (struct Event *))) == NULL) return t_error ("realloc failure\n"); if ((event[nevent] = malloc (sizeof (struct Event))) == NULL) return t_error ("realloc failure\n"); event[nevent]->name = possible_event[n].name; strcpy (event[nevent]->string, possible_event[n].string); #ifdef HAVE_PCL /* ** Set up PCL stuff based on what t_setoption has provided. */ if (event[nevent]->name > pcl_start && event[nevent]->name < pcl_end) { pclenabled = true; event[nevent]->index = ncounter; switch (possible_event[n].name) { case pcl_l1dcache_miss: counter_list[ncounter++] = PCL_L1DCACHE_MISS; break; case pcl_l2cache_miss: counter_list[ncounter++] = PCL_L2CACHE_MISS; break; case pcl_cycles: pcl_cyclesindex = ncounter; pcl_cyclesenabled = true; counter_list[ncounter++] = PCL_CYCLES; break; case pcl_elapsed_cycles: counter_list[ncounter++] = PCL_ELAPSED_CYCLES; break; case pcl_fp_instr: counter_list[ncounter++] = PCL_FP_INSTR; break; case pcl_loadstore_instr: counter_list[ncounter++] = PCL_LOADSTORE_INSTR; break; case pcl_instr: counter_list[ncounter++] = PCL_INSTR; break; case pcl_stall: counter_list[ncounter++] = PCL_STALL; break; default: break; } } #endif ++nevent; } } #ifdef HAVE_PCL if (ncounter > 0) { int thread; /* thread number */ nbytes = numthreads * sizeof (PCL_DESCR_TYPE); if ((descr = (PCL_DESCR_TYPE *) malloc (nbytes)) == 0) return t_error ("malloc failure: %d items\n", numthreads); /* ** PCLinit must be called on a per-thread basis. Therefore must make the call here ** rather than in t_initialize. null timer list flags not initialized. ** Also, the critical section is necessary because PCLstart appears not to be ** thread-safe. */ #pragma omp parallel for for (thread = 0; thread < numthreads; thread++) { unsigned int flags; /* mode flags needed by PCL */ #pragma omp critical { if ((ret = PCLinit (&descr[thread])) != PCL_SUCCESS) return t_error ("unable to allocate PCL handle for thread %d. %s\n", thread, t_pclstr (ret)); /* ** Always count user mode only */ flags = PCL_MODE_USER; if ((ret = PCLquery (descr[thread], counter_list, ncounter, flags)) != PCL_SUCCESS) return t_error ("Bad return from PCLquery thread %d: %s\n", thread, t_pclstr (ret)); if ((ret = PCLstart (descr[thread], counter_list, ncounter, flags)) != PCL_SUCCESS) return t_error ("PCLstart failed thread=%d: %s\n", thread, t_pclstr (ret)); } } } #endif t_initialized = true; #endif return 0; }
vect-simd-clone-7.c
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int a[N]; long long int b[N]; short c[N]; #pragma omp declare simd #pragma omp declare simd uniform(b) linear(c:3) __attribute__((noinline)) short foo (int a, long long int b, int c) { return a + b + c; } __attribute__((noinline, noclone)) void bar (int x) { int i; if (x == 0) { #pragma omp simd for (i = 0; i < N; i++) c[i] = foo (a[i], b[i], c[i]); } else { #pragma omp simd for (i = 0; i < N; i++) c[i] = foo (a[i], x, i * 3); } } __attribute__((noinline, noclone)) void baz (void) { int i; for (i = 0; i < N; i++) { a[i] = 2 * i; b[i] = -7 * i + 6; c[i] = (i & 31) << 4; } } int main () { int i; check_vect (); baz (); bar (0); for (i = 0; i < N; i++) if (a[i] != 2 * i || b[i] != 6 - 7 * i || c[i] != 6 - 5 * i + ((i & 31) << 4)) abort (); else a[i] = c[i]; bar (17); for (i = 0; i < N; i++) if (a[i] != 6 - 5 * i + ((i & 31) << 4) || b[i] != 6 - 7 * i || c[i] != 23 - 2 * i + ((i & 31) << 4)) abort (); return 0; }
kmp_sch_simd_guided.c
// RUN: %libomp-compile-and-run // REQUIRES: openmp-4.5 /* Test for the 'schedule(simd:guided)' clause. Compiler needs to generate a dynamic dispatching and pass the schedule value 46 to the OpenMP RTL. Test uses numerous loop parameter combinations. */ #include <stdio.h> #include <omp.h> #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #else #include <unistd.h> #define delay() usleep(10); #endif // uncomment for debug diagnostics: //#define DEBUG #define SIMD_LEN 4 // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; extern int __kmpc_global_thread_num(id*); extern void __kmpc_barrier(id*, int gtid); extern void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); extern void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); extern int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); extern int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- int run_loop_64(i64 loop_lb, i64 loop_ub, i64 loop_st, int loop_chunk) { int err = 0; static int volatile loop_sync = 0; i64 lb; // Chunk lower bound i64 ub; // Chunk upper bound i64 st; // Chunk stride int rc; int tid = omp_get_thread_num(); int gtid = tid; int last; #if DEBUG printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n", (int)sizeof(i64), gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk); #endif // Don't test degenerate cases that should have been discovered by codegen if (loop_st == 0) return 0; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return 0; __kmpc_dispatch_init_8(&loc, gtid, kmp_sch_guided_simd, loop_lb, loop_ub, loop_st, loop_chunk); if (tid == 0) { // Let the master thread handle the chunks alone int chunk; // No of current chunk i64 next_lb; // Lower bound of the next chunk i64 last_ub; // Upper bound of the last processed chunk u64 cur; // Number of interations in current chunk u64 max; // Max allowed iterations for current chunk int undersized = 0; chunk = 0; next_lb = loop_lb; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations while (__kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if DEBUG printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub); #endif // Check if previous chunk (it is not the final chunk) is undersized if (undersized) { printf("Error with chunk %d\n", chunk); err++; } // Check lower and upper bounds if (lb != next_lb) { printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk); err++; } if (loop_st > 0) { if (!(ub <= loop_ub)) { printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb <= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } } else { if (!(ub >= loop_ub)) { printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb >= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } }; // if // Stride should not change if (!(st == loop_st)) { printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk); err++; } cur = (ub - lb) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum if (!(cur <= max + 1)) { printf("Error with iter %d, %d\n", cur, max); err++; } // Update maximum for the next chunk if (cur < max) max = cur; next_lb = ub + loop_st; last_ub = ub; undersized = (cur < loop_chunk); }; // while // Must have at least one chunk if (!(chunk > 0)) { printf("Error with chunk %d\n", chunk); err++; } // Must have the right last iteration index if (loop_st > 0) { if (!(last_ub <= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st > loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } } else { if (!(last_ub >= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st < loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } }; // if // Let non-master threads go loop_sync = 1; } else { int i; // Workers wait for master thread to finish, then call __kmpc_dispatch_next for (i = 0; i < 1000000; ++ i) { if (loop_sync != 0) { break; }; // if }; // for i while (loop_sync == 0) { delay(); }; // while // At this moment we do not have any more chunks -- all the chunks already // processed by master thread rc = __kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st); if (rc) { printf("Error return value\n"); err++; } }; // if __kmpc_barrier(&loc, gtid); if (tid == 0) { loop_sync = 0; // Restore original state #if DEBUG printf("run_loop_64(): at the end\n"); #endif }; // if __kmpc_barrier(&loc, gtid); return err; } // run_loop // --------------------------------------------------------------------------- int run_loop_32(int loop_lb, int loop_ub, int loop_st, int loop_chunk) { int err = 0; static int volatile loop_sync = 0; int lb; // Chunk lower bound int ub; // Chunk upper bound int st; // Chunk stride int rc; int tid = omp_get_thread_num(); int gtid = tid; int last; #if DEBUG printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n", (int)sizeof(int), gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk); #endif // Don't test degenerate cases that should have been discovered by codegen if (loop_st == 0) return 0; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return 0; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_guided_simd, loop_lb, loop_ub, loop_st, loop_chunk); if (tid == 0) { // Let the master thread handle the chunks alone int chunk; // No of current chunk int next_lb; // Lower bound of the next chunk int last_ub; // Upper bound of the last processed chunk u64 cur; // Number of interations in current chunk u64 max; // Max allowed iterations for current chunk int undersized = 0; chunk = 0; next_lb = loop_lb; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if DEBUG printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub); #endif // Check if previous chunk (it is not the final chunk) is undersized if (undersized) { printf("Error with chunk %d\n", chunk); err++; } // Check lower and upper bounds if (lb != next_lb) { printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk); err++; } if (loop_st > 0) { if (!(ub <= loop_ub)) { printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb <= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } } else { if (!(ub >= loop_ub)) { printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb >= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } }; // if // Stride should not change if (!(st == loop_st)) { printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk); err++; } cur = (ub - lb) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum if (!(cur <= max + 1)) { printf("Error with iter %d, %d\n", cur, max); err++; } // Update maximum for the next chunk if (cur < max) max = cur; next_lb = ub + loop_st; last_ub = ub; undersized = (cur < loop_chunk); }; // while // Must have at least one chunk if (!(chunk > 0)) { printf("Error with chunk %d\n", chunk); err++; } // Must have the right last iteration index if (loop_st > 0) { if (!(last_ub <= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st > loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } } else { if (!(last_ub >= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st < loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } }; // if // Let non-master threads go loop_sync = 1; } else { int i; // Workers wait for master thread to finish, then call __kmpc_dispatch_next for (i = 0; i < 1000000; ++ i) { if (loop_sync != 0) { break; }; // if }; // for i while (loop_sync == 0) { delay(); }; // while // At this moment we do not have any more chunks -- all the chunks already // processed by the master thread rc = __kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st); if (rc) { printf("Error return value\n"); err++; } }; // if __kmpc_barrier(&loc, gtid); if (tid == 0) { loop_sync = 0; // Restore original state #if DEBUG printf("run_loop<>(): at the end\n"); #endif }; // if __kmpc_barrier(&loc, gtid); return err; } // run_loop // --------------------------------------------------------------------------- int run_64(int num_th) { int err = 0; #pragma omp parallel num_threads(num_th) { int chunk; i64 st, lb, ub; for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) { for (st = 1; st <= 3; ++ st) { for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) { for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) { err += run_loop_64(lb, ub, st, chunk); err += run_loop_64(ub, lb, -st, chunk); }; // for ub }; // for lb }; // for st }; // for chunk } return err; } // run_all int run_32(int num_th) { int err = 0; #pragma omp parallel num_threads(num_th) { int chunk, st, lb, ub; for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) { for (st = 1; st <= 3; ++ st) { for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) { for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) { err += run_loop_32(lb, ub, st, chunk); err += run_loop_32(ub, lb, -st, chunk); }; // for ub }; // for lb }; // for st }; // for chunk } return err; } // run_all // --------------------------------------------------------------------------- int main() { int n, err = 0; for (n = 1; n <= 4; ++ n) { err += run_32(n); err += run_64(n); }; // for n if (err) printf("failed with %d errors\n", err); else printf("passed\n"); return err; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(32*t2-Nz-508,512)),ceild(4*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t3+Nx,512),floord(Nt+Nx-4,512)),floord(16*t1+Nx+29,512)),floord(32*t2+Nx+28,512)),floord(32*t1-32*t2+Nz+Nx+27,512));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),512*t4+510),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_3x3_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[8][8][packn]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * packn; for (int m = 0; m < 8; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _r06 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _r07 = vle16_v_f16m1(r0 + packn * 7, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r00, _r06, vl), 5.25f, vfsub_vv_f16m1(_r04, _r02, vl), vl); vfloat16m1_t _tmp7m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r07, _r01, vl), 5.25f, vfsub_vv_f16m1(_r03, _r05, vl), vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[7][m], _tmp7m, vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r02, _r06, vl), -4.25f, _r04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r01, _r05, vl), -4.25f, _r03, vl); vfloat16m1_t _tmp1m = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp2m = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl); vfloat16m1_t _tmp3m = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp4m = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_r06, 4.f, vfmacc_vf_f16m1(_r02, -1.25f, _r04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _tmp6m = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); vse16_v_f16m1(tmp[6][m], _tmp6m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; __fp16* r0_tm_6 = r0_tm_0 + tiles * packn * 6; __fp16* r0_tm_7 = r0_tm_0 + tiles * packn * 7; for (int m = 0; m < 8; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f16m1(_tmp04, _tmp02, vl), vl); vfloat16m1_t _r0tm7 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f16m1(_tmp03, _tmp05, vl), vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl); vfloat16m1_t _r0tm1 = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _r0tm2 = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl); vfloat16m1_t _r0tm3 = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _r0tm4 = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_tmp06, 4.f, vfmacc_vf_f16m1(_tmp02, -1.25f, _tmp04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl); vfloat16m1_t _r0tm5 = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _r0tm6 = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); vse16_v_f16m1(r0_tm_6, _r0tm6, vl); vse16_v_f16m1(r0_tm_7, _r0tm7, vl); r0_tm_0 += tiles * packn * 8; r0_tm_1 += tiles * packn * 8; r0_tm_2 += tiles * packn * 8; r0_tm_3 += tiles * packn * 8; r0_tm_4 += tiles * packn * 8; r0_tm_5 += tiles * packn * 8; r0_tm_6 += tiles * packn * 8; r0_tm_7 += tiles * packn * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE c99 variable length array __fp16 tmp[6][8][packn]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; const __fp16* output0_tm_6 = output0_tm_0 + tiles * packn * 6; const __fp16* output0_tm_7 = output0_tm_0 + tiles * packn * 7; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * packn; // TODO rvv optimize for (int m = 0; m < 8; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _out0tm6 = vle16_v_f16m1(output0_tm_6, vl); vfloat16m1_t _out0tm7 = vle16_v_f16m1(output0_tm_7, vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); output0_tm_0 += tiles * packn * 8; output0_tm_1 += tiles * packn * 8; output0_tm_2 += tiles * packn * 8; output0_tm_3 += tiles * packn * 8; output0_tm_4 += tiles * packn * 8; output0_tm_5 += tiles * packn * 8; output0_tm_6 += tiles * packn * 8; output0_tm_7 += tiles * packn * 8; } for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl); vfloat16m1_t _out04 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 4, _out04, vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl); vfloat16m1_t _out05 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp07, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); vse16_v_f16m1(output0 + packn * 5, _out05, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[6][6][packn]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * packn; for (int m = 0; m < 6; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r04, _r03, vl), -4.f, vfadd_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r03, vl), 4.f, vfsub_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), -2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), 2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp5m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl); vfloat16m1_t _r0tm1 = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm2 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm3 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm4 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm5 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); r0_tm_0 += tiles * packn * 6; r0_tm_1 += tiles * packn * 6; r0_tm_2 += tiles * packn * 6; r0_tm_3 += tiles * packn * 6; r0_tm_4 += tiles * packn * 6; r0_tm_5 += tiles * packn * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE variable length array __fp16 tmp[4][6][packn]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * packn; // TODO rvv optimize for (int m = 0; m < 6; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp02a, vl), _tmp02b, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); output0_tm_0 += tiles * packn * 6; output0_tm_1 += tiles * packn * 6; output0_tm_2 += tiles * packn * 6; output0_tm_3 += tiles * packn * 6; output0_tm_4 += tiles * packn * 6; output0_tm_5 += tiles * packn * 6; } for (int m = 0; m < 4; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
THTensorConv.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorConv.c" #else /* 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validXCorr2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + yy*sr*ic + xx*sc; real *pw_ = k_; real sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { /* SSE-based convolution */ for(yy = 0; yy < or; yy++) { real *pi_ = t_ + yy*sr*ic; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(r_, r_, pis_, alpha*pw_[kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } r_ += oc; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validConv2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + yy*sr*ic + xx*sc; real *pw_ = k_ + kr*kc - 1; real sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { /* SSE-based convolution */ for(yy = 0; yy < or; yy++) { real *pw_ = k_ + kr*kc - 1; real *pi_ = t_ + yy*sr*ic; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(r_, r_, pis_, alpha*pw_[-kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } r_ += oc; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ void THTensor_(fullConv2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long oc = (ic - 1) * sc + kc; long xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + yy*sr*oc + xx*sc; real *pw_ = k_; for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[kx]; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for(yy = 0; yy < ir; yy++) { real *po_ = r_ + yy*sr*oc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(pos_, pos_, t_, alpha*pw_[kx], ic); pos_++; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_ += ic; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ void THTensor_(fullXCorr2Dptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long oc = (ic - 1) * sc + kc; long xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + yy*sr*oc + xx*sc; real *pw_ = k_ + kr*kc -1; long kx, ky; for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for(yy = 0; yy < ir; yy++) { real *po_ = r_ + yy*sr*oc; real *pw_ = k_ + kr*kc -1; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(pos_, pos_, t_, pw_[-kx]*alpha, ic); pos_++; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_ += ic; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, valid convolution. for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(validXCorr2DRevptr)(real *r_, real alpha, real *t_, long ir, long ic, real *k_, long kr, long kc, long sr, long sc) { long or = ir - (kr - 1) * sr; long oc = ic - (kc - 1) * sc; long xx, yy, kx, ky; if ((sc != 1) || (kc < 4)) { /* regular convolution */ for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy*sr*ic + xx*sc; real z = *k_++ * alpha; for(ky = 0; ky < or; ky++) { for(kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } } } } else { /* SSE-based convolution */ for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy*sr*ic + xx*sc; real z = *k_++ * alpha; for(ky = 0; ky < or; ky++) { THVector_(cadd)(po_, po_, pi_, z, oc); pi_ += ic; po_ += oc; } } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validXCorr3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long ot = (it - kt) / st + 1; long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long zz, xx, yy; for (zz = 0; zz < ot; zz++) { for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real *pw_ = k_; real sum = 0; long kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } pi_ += (ir-kr)*ic; /* next input slice */ } /* Update output */ *r_++ += sum*alpha; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validConv3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long ot = (it - kt) / st + 1; long or = (ir - kr) / sr + 1; long oc = (ic - kc) / sc + 1; long zz, xx, yy; for(zz = 0; zz < ot; zz++) { for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real *pw_ = k_ + kt*kr*kc - 1; real sum = 0; long kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } pi_ += (ir-kr)*ic; /* next input slice */ } /* Update output */ *r_++ += alpha*sum; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ void THTensor_(fullConv3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long or = (ir - 1) * sr + kr; long oc = (ic - 1) * sc + kc; long zz, xx, yy; for(zz = 0; zz < it; zz++) { for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; real *pw_ = k_; long kz, kx, ky; /* printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); */ for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { /* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */ po_[kx] += z * pw_[kx]; /* printf("o=%g " , po_[kx]); */ } /* printf("\n"); */ po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } po_ += (or-kr)*oc; /* next output slice */ /* printf("\n"); */ } t_++; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ void THTensor_(fullXCorr3Dptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long or = (ir - 1) * sr + kr; long oc = (ic - 1) * sc + kc; long zz, xx, yy; for(zz = 0; zz < it; zz++) { for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; real *pw_ = k_ + kt*kr*kc -1; long kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } po_ += (or-kr)*oc; /* next output slice */ } t_++; } } } } /* 3D Input, 3D kernel : convolve given image with the given kernel, valid convolution. for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(validXCorr3DRevptr)(real *r_, real alpha, real *t_, long it, long ir, long ic, real *k_, long kt, long kr, long kc, long st, long sr, long sc) { long ot = it - (kt - 1) * st; long or = ir - (kr - 1) * sr; long oc = ic - (kc - 1) * sc; long zz, xx, yy; for(zz = 0; zz < kt; zz++) { for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real z = *k_++ * alpha; long kz, kx, ky; for(kz = 0; kz < ot; kz++) { for(ky = 0; ky < or; ky++) { for(kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } pi_ += (ir-or)*ic; /* next input slice */ } } } } } void THTensor_(conv2d)(real* output_data, real alpha, real* ptr_input, long nInputRows, long nInputCols, real* ptr_weight, long nKernelRows, long nKernelCols, long srow, long scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } void THTensor_(conv3d)(real* output_data, real alpha, real* ptr_input, long nInputDepth, long nInputRows, long nInputCols, real* ptr_weight, long nKernelDepth, long nKernelRows, long nKernelCols, long sdepth, long srow, long scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(fullConv3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else if (*xc == 'X') THTensor_(validXCorr3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(validConv3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); } long THTensor_(convsize)(long x, long k, long s, const char* vf) { THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'"); if (*vf == 'V') return (x-k)/s + 1; else return (x-1)*s + k; } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) { long nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) { long nbatch, nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputRows, nOutputCols; long istride0, kstride0, istride1, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; istride1 = input->stride[1]; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelPlane = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel"); THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; for(i = 0; i < nInputPlane; i++) { long p; for(p = 0; p < nbatch; p++) { /* get kernel */ real *ptr_weight = weight_data + p*kstride0 + k*kstride1; /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data + p*istride0 + i*istride1; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelPlane, nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { long i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0, kstride1; THTensor *input; THTensor* kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ real *ptr_output = output_data + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long kstride0, kstride1; THTensor *input; THTensor* kernel; long nbatch; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; long p; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(p) for(p=0; p < r_->size[0]; p++) { long k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; long l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } } #pragma omp parallel for private(p) for(p=0; p < nbatch; p++) { long k; for(k = 0; k < nOutputPlane; k++) { long i; /* get output */ real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 2D input, 2D kernel, 2D output scalar multiplication like y <- x*y + beta*y */ void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { THTensor *input; THTensor* kernel; long nInputRows; long nInputCols; long nKernelRows; long nKernelCols; long nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 2 , 3, "input: 2D Tensor expected"); THArgCheck(k_->nDimension == 2 , 4, "kernel: 2D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputRows = input->size[0]; nInputCols = input->size[1]; nKernelRows = kernel->size[0]; nKernelCols = kernel->size[1]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize2d)(r_, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) THTensor_(zero)(r_); else if (beta != 1) THTensor_(mul)(r_, r_, beta); ptr_input = THTensor_(data)(input); ptr_weight = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); /* do image, kernel convolution */ THTensor_(conv2d)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output component wise multiplication like y <- y.*x + beta*y */ void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv2d)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output component wise multiplication like with a permutation map y <- y.*x + beta*y */ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputRows, nInputCols; long nKernelRows, nKernelCols; long nOutputPlane, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor* kernel; real *input_data; real *weight_data; real *output_data; long nmaps; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); nmaps = map->size[0]; for(k = 0; k < nmaps; k++) { /* get indices */ long from = (long)THTensor_(get2d)(map,k,0)-1; long to = (long)THTensor_(get2d)(map,k,1)-1; /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + from*istride0; /* get output */ real *ptr_output = output_data + to*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv2d)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k, i; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth= kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv3DRevger : Input image is smaller than kernel"); nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth; nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement)(r_); THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr3DRevptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k, i; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 5D kernel, 4D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k, i; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 5 , 4, "kernel: 5D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelDepth = kernel->size[2]; nKernelRows = kernel->size[3]; nKernelCols = kernel->size[4]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nOutputPlane; k++) { for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output scalar multiplication like y <- x*y + beta*y */ void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { THTensor *input; THTensor* kernel; long nInputDepth; long nInputRows; long nInputCols; long nKernelDepth; long nKernelRows; long nKernelCols; long nOutputDepth, nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputDepth = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; nKernelDepth = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) THTensor_(zero)(r_); else if (beta != 1) THTensor_(mul)(r_, r_, beta); ptr_input = THTensor_(data)(input); ptr_weight = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 4D output component wise multiplication like y <- y.*x + beta*y */ void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; long k; THArgCheck(t_->nDimension == 4 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 4D output component wise multiplication like with a permutation map y <- y.*x + beta*y */ void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, long sdepth, long srow, long scol, const char *vf, const char *xc) { long nInputPlane, nInputDepth, nInputRows, nInputCols; long nKernelDepth, nKernelRows, nKernelCols; long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; long istride0, kstride0; THTensor *input; THTensor *kernel; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; long nmaps; long k; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmap : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); nmaps = map->size[0]; for(k = 0; k < nmaps; k++) { /* get indices */ long from = (long)THTensor_(get2d)(map,k,0)-1; long to = (long)THTensor_(get2d)(map,k,1)-1; /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + from*istride0; /* get output */ real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv3d)(ptr_output, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } THTensor_(free)(input); THTensor_(free)(kernel); } #endif
parallelEnvironment.h
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Header file for parallelEnvironment.c. */ /* Contains variables declarations and function prototypes */ /* used to setup the MPI and OpenMP programming environment. */ /*-----------------------------------------------------------*/ #ifndef PARALLELENVIRONMENT_H_ #define PARALLELENVIRONMENT_H_ #include "benchmarkSetup.h" #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> /* function prototypes */ int benchmarkSupport(int required); int initParallelEnv(); int finaliseParallelEnv(); int findRank(int rankIn); int findNeighbours(); int compareProcNames(int rankA, int rankB); int setupCommunicators(); int procNameToHash(); int exchangeWorldRanks(int nodeA, int nodeB, int *otherWorldRank); void sendProcName(int destNode, int srcNode, char *destProcName); int crossCommBalance(int nodeA, int nodeB); /* variable declaration */ /*MPI variables */ #define TAG 1 /* set tag to match messages */ int myMPIRank, numMPIprocs; MPI_Comm comm, commCart; MPI_Comm crossComm, localComm; int localCommRank, localCommSize, crossCommRank; MPI_Status status; char myProcName[MPI_MAX_PROCESSOR_NAME]; int procNameLen; MPI_Request requestID; MPI_Status statusArray[4]; /* for haloexchange */ MPI_Request requestArray[4]; /* for haloexchange */ int leftNeighbour, rightNeighbour; int sizeInteger; int PPRanks[2]; /* ranks for pingpong or pingping */ /* OpenMP variables */ static int myThreadID, numThreads; /* make myThreadID a thread private variable */ #pragma omp threadprivate(myThreadID) /*Array to hold the global ID for each thread */ int *globalIDarray; int threadSupport; #endif /* PARALLELENVIRONMENT_H_ */
sse.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> * 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com> * 2015 Brandon Rowlett <browlett@nvidia.com> * 2015 Ken Fast <kfast@gdeb.com> */ #if !defined(SIMDE_X86_SSE_H) #define SIMDE_X86_SSE_H #include "mmx.h" #if defined(_WIN32) #include <windows.h> #endif HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #endif SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else SIMDE_ALIGN_TO_16 int8_t i8[16]; SIMDE_ALIGN_TO_16 int16_t i16[8]; SIMDE_ALIGN_TO_16 int32_t i32[4]; SIMDE_ALIGN_TO_16 int64_t i64[2]; SIMDE_ALIGN_TO_16 uint8_t u8[16]; SIMDE_ALIGN_TO_16 uint16_t u16[8]; SIMDE_ALIGN_TO_16 uint32_t u32[4]; SIMDE_ALIGN_TO_16 uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128[1]; SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; #endif SIMDE_ALIGN_TO_16 simde_float32 f32[4]; SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; SIMDE_ALIGN_TO_16 simde__m64 m64[2]; #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_ALIGN_TO_16 __m128 n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 int8x16_t neon_i8; SIMDE_ALIGN_TO_16 int16x8_t neon_i16; SIMDE_ALIGN_TO_16 int32x4_t neon_i32; SIMDE_ALIGN_TO_16 int64x2_t neon_i64; SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128_private; #if defined(SIMDE_X86_SSE_NATIVE) typedef __m128 simde__m128; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32x4_t simde__m128; #elif defined(SIMDE_WASM_SIMD128_NATIVE) typedef v128_t simde__m128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128; #elif defined(SIMDE_VECTOR_SUBSCRIPT) typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128_private simde__m128; #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) typedef simde__m128 __m128; #endif HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect"); HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect"); #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF) HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned"); #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_private(simde__m128_private v) { simde__m128 r; simde_memcpy(&r, &v, sizeof(r)); return r; } SIMDE_FUNCTION_ATTRIBUTES simde__m128_private simde__m128_to_private(simde__m128 v) { simde__m128_private r; simde_memcpy(&r, &v, sizeof(r)); return r; } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32) #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64) #endif #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32) #if defined(SIMDE_BUG_GCC_95782) SIMDE_FUNCTION_ATTRIBUTES SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128_to_altivec_f32(simde__m128 value) { simde__m128_private r_ = simde__m128_to_private(value); return r_.altivec_f32; } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) { simde__m128_private r_; r_.altivec_f32 = value; return simde__m128_from_private(r_); } #else SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32) #endif #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64) #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128); #endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */ enum { #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST, SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN, SIMDE_MM_ROUND_UP = _MM_ROUND_UP, SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO #else SIMDE_MM_ROUND_NEAREST = 0x0000, SIMDE_MM_ROUND_DOWN = 0x2000, SIMDE_MM_ROUND_UP = 0x4000, SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000 #endif }; #if defined(_MM_FROUND_TO_NEAREST_INT) # define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT # define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF # define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF # define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO # define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION # define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC # define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC #else # define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00 # define SIMDE_MM_FROUND_TO_NEG_INF 0x01 # define SIMDE_MM_FROUND_TO_POS_INF 0x02 # define SIMDE_MM_FROUND_TO_ZERO 0x03 # define SIMDE_MM_FROUND_CUR_DIRECTION 0x04 # define SIMDE_MM_FROUND_RAISE_EXC 0x00 # define SIMDE_MM_FROUND_NO_EXC 0x08 #endif #define SIMDE_MM_FROUND_NINT \ (SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_FLOOR \ (SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_CEIL \ (SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_TRUNC \ (SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_RINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_NEARBYINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC) #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT) # define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT # define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF # define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF # define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO # define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION # define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC # define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT # define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR # define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL # define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC # define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT # define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT #endif SIMDE_FUNCTION_ATTRIBUTES unsigned int SIMDE_MM_GET_ROUNDING_MODE(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _MM_GET_ROUNDING_MODE(); #elif defined(SIMDE_HAVE_FENV_H) unsigned int vfe_mode; switch (fegetround()) { #if defined(FE_TONEAREST) case FE_TONEAREST: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; #endif #if defined(FE_TOWARDZERO) case FE_TOWARDZERO: vfe_mode = SIMDE_MM_ROUND_DOWN; break; #endif #if defined(FE_UPWARD) case FE_UPWARD: vfe_mode = SIMDE_MM_ROUND_UP; break; #endif #if defined(FE_DOWNWARD) case FE_DOWNWARD: vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO; break; #endif default: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; } return vfe_mode; #else return SIMDE_MM_ROUND_NEAREST; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE() #endif SIMDE_FUNCTION_ATTRIBUTES void SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) { #if defined(SIMDE_X86_SSE_NATIVE) _MM_SET_ROUNDING_MODE(a); #elif defined(SIMDE_HAVE_FENV_H) int fe_mode = FE_TONEAREST; switch (a) { #if defined(FE_TONEAREST) case SIMDE_MM_ROUND_NEAREST: fe_mode = FE_TONEAREST; break; #endif #if defined(FE_TOWARDZERO) case SIMDE_MM_ROUND_TOWARD_ZERO: fe_mode = FE_TOWARDZERO; break; #endif #if defined(FE_DOWNWARD) case SIMDE_MM_ROUND_DOWN: fe_mode = FE_DOWNWARD; break; #endif #if defined(FE_UPWARD) case SIMDE_MM_ROUND_UP: fe_mode = FE_UPWARD; break; #endif default: return; } fesetround(fe_mode); #else (void) a; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_mm_getcsr (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_getcsr(); #else return SIMDE_MM_GET_ROUNDING_MODE(); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_getcsr() simde_mm_getcsr() #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_setcsr (uint32_t a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_setcsr(a); #else SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a)); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setcsr(a) simde_mm_setcsr(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding) SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) { simde__m128_private r_, a_ = simde__m128_to_private(a); (void) lax_rounding; /* For architectures which lack a current direction SIMD instruction. * * Note that NEON actually has a current rounding mode instruction, * but in ARMv8+ the rounding mode is ignored and nearest is always * used, so we treat ARMv7 as having a rounding mode but ARMv8 as * not. */ #if \ defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \ defined(SIMDE_ARM_NEON_A32V8) if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; #endif switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_f32 = vrndiq_f32(a_.neon_f32); #elif defined(simde_math_nearbyintf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_nearbyintf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEAREST_INT: #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndnq_f32(a_.neon_f32); #elif defined(simde_math_roundevenf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_roundevenf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndmq_f32(a_.neon_f32); #elif defined(simde_math_floorf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_floorf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndpq_f32(a_.neon_f32); #elif defined(simde_math_ceilf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_ceilf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndq_f32(a_.neon_f32); #elif defined(simde_math_truncf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_truncf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; default: HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); } return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) #define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding)) #else #define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps(e3, e2, e1, e0); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 }; r_.neon_f32 = vld1q_f32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3); #else r_.f32[0] = e0; r_.f32[1] = e1; r_.f32[2] = e2; r_.f32[3] = e3; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps1 (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps1(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) (void) a; return vec_splats(a); #else return simde_mm_set_ps(a, a, a, a); #endif } #define simde_mm_set1_ps(a) simde_mm_set_ps1(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps1(a) simde_mm_set_ps1(a) # define _mm_set1_ps(a) simde_mm_set1_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_move_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_move_ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = { 16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); #else r_.f32[0] = b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_move_ss(a, b) simde_mm_move_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 + b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] + b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ps(a, b) simde_mm_add_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_add_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0); float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); // the upper values in the result must be the remnants of <a>. r_.neon_f32 = vaddq_f32(a_.neon_f32, value); #else r_.f32[0] = a_.f32[0] + b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ss(a, b) simde_mm_add_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_and_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_and_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 & b_.i32; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_and_ps(a, b) simde_mm_and_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_andnot_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_andnot_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32 & b_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]) & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_xor_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_xor_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] ^ b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_or_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_or_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] | b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_or_ps(a, b) simde_mm_or_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_not_ps(simde__m128 a) { #if defined(SIMDE_X86_AVX512VL_NATIVE) __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55)); #elif defined(SIMDE_X86_SSE2_NATIVE) /* Note: we use ints instead of floats because we don't want cmpeq * to return false for (NaN, NaN) */ __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmvnq_s32(a_.neon_i32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) { /* This function is for when you want to blend two elements together * according to a mask. It is similar to _mm_blendv_ps, except that * it is undefined whether the blend is based on the highest bit in * each lane (like blendv) or just bitwise operations. This allows * us to implement the function efficiently everywhere. * * Basically, you promise that all the lanes in mask are either 0 or * ~0. */ #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_ps(a, b, mask); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b), mask_ = simde__m128_to_private(mask); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint32_t wa SIMDE_VECTOR(16); uint32_t wb SIMDE_VECTOR(16); uint32_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u16); SIMDE_CONVERT_VECTOR_(wb, b_.u16); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u16, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b) # define _m_pavgw(a, b) simde_mm_avg_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint16_t wa SIMDE_VECTOR(16); uint16_t wb SIMDE_VECTOR(16); uint16_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u8); SIMDE_CONVERT_VECTOR_(wb, b_.u8); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u8, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b) # define _m_pavgb(a, b) simde_mm_avg_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_abs_ps(simde__m128 a) { #if defined(SIMDE_X86_AVX512F_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0)) return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vabsq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_abs(a_.altivec_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_fabsf(a_.f32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpge_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpge_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpgt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpgt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION) /* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float)) is missing from XL C/C++ v16.1.1, though the documentation (table 89 on page 432 of the IBM XL C/C++ for Linux Compiler Reference, Version 16.1.1) shows that it should be present. Both GCC and clang support it. */ r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) /* Note: NEON does not have ordered compare builtin Need to compare a eq a and b eq b to check for NaN Do AND of results to get final */ uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vandq_u32(ceqaa, ceqbb); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpunord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpunord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #else return a_.f32[0] == b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #else return a_.f32[0] >= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #else return a_.f32[0] > b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #else return a_.f32[0] <= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #else return a_.f32[0] < b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #else return a_.f32[0] != b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) { simde__m128_private r_, dest_ = simde__m128_to_private(dest), src_ = simde__m128_to_private(src); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0))); r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t sign_pos = wasm_f32x4_splat(-0.0f); r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) #if !defined(HEDLEY_IBM_VERSION) r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32); #else r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32); #endif #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f)); r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos); #elif defined(SIMDE_IEEE754_STORAGE) (void) src_; (void) dest_; simde__m128 sign_pos = simde_mm_set1_ps(-0.0f); r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]); } #endif return simde__m128_from_private(r_); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) { return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_pi2ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else a_ = simde__m128_to_private(a); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_si2ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_si2ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); r_.i32[1] = a_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_ss2si(a); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0); #else simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { simde_float32 v = a_.i16[i]; r_.f32[i] = v; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32x2_ps(a, b); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32); SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32); #else r_.f32[0] = (simde_float32) a_.i32[0]; r_.f32[1] = (simde_float32) a_.i32[1]; r_.f32[2] = (simde_float32) b_.i32[0]; r_.f32[3] = (simde_float32) b_.i32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8)))); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]); r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]); r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]); r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi16 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi16(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi32(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi8 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi8(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471) /* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to * i16, combine with an all-zero vector of i16 (which will become the upper * half), narrow to i8. */ float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)); float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)); float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min)); r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)) r_.i8[i] = INT8_MAX; else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)) r_.i8[i] = INT8_MIN; else r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i])); } /* Note: the upper half is undefined */ #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (simde_float32) a_.u16[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8)))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtsi32_ss(a, b); #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtsi64_ss(a, b); #else return _mm_cvtsi64x_ss(a, b); #endif #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float32 simde_mm_cvtss_f32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtss_f32(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_f32(a_.neon_f32, 0); #else return a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtss_si32 (simde__m128 a) { return simde_mm_cvt_ss2si(a); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvtss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtss_si64(a); #else return _mm_cvtss_si64x(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0))); #else return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]); } #endif return simde__m64_from_private(r_); #endif } #define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a)) # define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtt_ss2si(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif #endif } #define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a)) # define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvttss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER) #if defined(__PGI) return _mm_cvttss_si64x(a); #else return _mm_cvttss_si64(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip0 = vrecpeq_f32(b_.neon_f32); float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32)); r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] / b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ps(a, b) simde_mm_div_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_div_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = a_.f32[0] / b_.f32[0]; SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ss(a, b) simde_mm_div_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int16_t simde_mm_extract_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private a_ = simde__m64_to_private(a); return a_.i16[imm8]; } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) # if defined(SIMDE_BUG_CLANG_44589) # define simde_mm_extract_pi16(a, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) #endif #define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8)) # define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private r_, a_ = simde__m64_to_private(a); r_.i64[0] = a_.i64[0]; r_.i16[imm8] = i; return simde__m64_from_private(r_); } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # if defined(SIMDE_BUG_CLANG_44589) # define ssimde_mm_insert_pi16(a, i, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ (_mm_insert_pi16((a), (i), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) #endif #define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) # define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_ld(0, mem_addr); #else simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load1_ps (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps1(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_dup_f32(mem_addr); #else r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr)); #endif return simde__m128_from_private(r_); #endif } #define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) # define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ss (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ss(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0); #else r_.f32[0] = *mem_addr; r_.i32[1] = 0; r_.i32[2] = 0; r_.i32[3] = 0; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr))); #else simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr); r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr))) #else #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr)) #endif #endif /* The SSE documentation says that there are no alignment requirements for mem_addr. Unfortunately they used the __m64 type for the argument which is supposed to be 8-byte aligned, so some compilers (like clang with -Wcast-align) will generate a warning if you try to cast, say, a simde_float32* to a simde__m64* for this function. I think the choice of argument type is unfortunate, but I do think we need to stick to it here. If there is demand I can always add something like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */ SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vld1_f32( HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32)); #else simde__m64_private b_; simde_memcpy(&b_, mem_addr, sizeof(b_)); r_.i32[0] = b_.i32[0]; r_.i32[1] = b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr))) #else #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr)) #endif #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadr_ps(mem_addr); #else simde__m128_private r_, v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr)); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrev64q_f32(v_.neon_f32); r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_reve(v_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); #else r_.f32[0] = v_.f32[3]; r_.f32[1] = v_.f32[2]; r_.f32[2] = v_.f32[1]; r_.f32[3] = v_.f32[0]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadu_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_load(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #else simde_memcpy(&r_, mem_addr, sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr)); #else simde__m64_private a_ = simde__m64_to_private(a), mask_ = simde__m64_to_private(mask); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) if (mask_.i8[i] < 0) mem_addr[i] = a_.i8[i]; #endif } #define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) # define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b) # define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS) r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ps(a, b) simde_mm_max_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b) # define _m_pmaxub(a, b) simde_mm_max_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_max_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ss(a, b) simde_mm_max_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b) # define _m_pminsw(a, b) simde_mm_min_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ps(a, b); #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128); #else r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128)); #endif return simde__m128_from_private(r_); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); #else r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); #endif return simde__m128_from_private(r_); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) simde__m128 mask = simde_mm_cmplt_ps(a, b); return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ps(a, b) simde_mm_min_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminub(a, b) simde_mm_min_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b) # define _m_pminub(a, b) simde_mm_min_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_min_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ss(a, b) simde_mm_min_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movehl_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movehl_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a32 = vget_high_f32(a_.neon_f32); float32x2_t b32 = vget_high_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(b32, a32); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergel(b_.altivec_i64, a_.altivec_i64)); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3); #else r_.f32[0] = b_.f32[2]; r_.f32[1] = b_.f32[3]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movelh_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movelh_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a10 = vget_low_f32(a_.neon_f32); float32x2_t b10 = vget_low_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(a10, b10); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergeh(a_.altivec_i64, b_.altivec_i64)); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_pi8 (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_pi8(a); #else simde__m64_private a_ = simde__m64_to_private(a); int r = 0; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) uint8x8_t input = a_.neon_u8; const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0}; const uint8x8_t mask_and = vdup_n_u8(0x80); const int8x8_t mask_shift = vld1_s8(xr); const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift); uint8x8_t lo = mask_result; r = vaddv_u8(lo); #else const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]); SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < nmemb ; i++) { r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i); } #endif return r; #endif } #define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a) # define _m_pmovmskb(a) simde_mm_movemask_pi8(a) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_ps(a); #else int r = 0; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) static const int32_t shift_amount[] = { 0, 1, 2, 3 }; const int32x4_t shift = vld1q_s32(shift_amount); uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31); return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift))); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) // Shift out everything but the sign bits with a 32-bit unsigned shift right. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31)); // Merge the two pairs together with a 64-bit unsigned shift right + add. uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); // Extract the result. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); #else SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) { r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i; } #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_ps(a) simde_mm_movemask_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] * b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_mul_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] * b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_mulhi_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16); const uint32x4_t t2 = vshrq_n_u32(t1, 16); const uint16x4_t t3 = vmovn_u32(t2); r_.neon_u16 = t3; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16))); } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b) # define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION) #define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0) #define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1) #define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2) #define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3) #define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4) #define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5) #define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6) #define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7) #else #define SIMDE_MM_HINT_NTA 0 #define SIMDE_MM_HINT_T0 1 #define SIMDE_MM_HINT_T1 2 #define SIMDE_MM_HINT_T2 3 #define SIMDE_MM_HINT_ENTA 4 #define SIMDE_MM_HINT_ET0 5 #define SIMDE_MM_HINT_ET1 6 #define SIMDE_MM_HINT_ET2 7 #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wreserved-id-macro") _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #endif #undef _MM_HINT_NTA #define _MM_HINT_NTA SIMDE_MM_HINT_NTA #undef _MM_HINT_T0 #define _MM_HINT_T0 SIMDE_MM_HINT_T0 #undef _MM_HINT_T1 #define _MM_HINT_T1 SIMDE_MM_HINT_T1 #undef _MM_HINT_T2 #define _MM_HINT_T2 SIMDE_MM_HINT_T2 #undef _MM_HINT_ETNA #define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA #undef _MM_HINT_ET0 #define _MM_HINT_ET0 SIMDE_MM_HINT_ET0 #undef _MM_HINT_ET1 #define _MM_HINT_ET1 SIMDE_MM_HINT_ET1 #undef _MM_HINT_ET1 #define _MM_HINT_ET2 SIMDE_MM_HINT_ET2 HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch (char const* p, int i) { #if defined(HEDLEY_GCC_VERSION) __builtin_prefetch(p); #else (void) p; #endif (void) i; } #if defined(SIMDE_X86_SSE_NATIVE) #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */ #define simde_mm_prefetch(p, i) \ (__extension__({ \ HEDLEY_DIAGNOSTIC_PUSH \ HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ _mm_prefetch((p), (i)); \ HEDLEY_DIAGNOSTIC_POP \ })) #else #define simde_mm_prefetch(p, i) _mm_prefetch(p, i) #endif #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_prefetch(p, i) simde_mm_prefetch(p, i) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_negate_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_VECTOR_NEGATE) r_.f32 = -a_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = -a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip = vrecpeq_f32(a_.neon_f32); #if SIMDE_ACCURACY_PREFERENCE > 0 for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) { recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32)); } #endif r_.neon_f32 = recip; #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_re(a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.f32 = 1.0f / a_.f32; #elif defined(SIMDE_IEEE754_STORAGE) /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { int32_t ix; simde_float32 fx = a_.f32[i]; simde_memcpy(&ix, &fx, sizeof(ix)); int32_t x = INT32_C(0x7EF311C3) - ix; simde_float32 temp; simde_memcpy(&temp, &x, sizeof(temp)); r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); } #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ps(a) simde_mm_rcp_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rcp_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); r_.f32[0] = 1.0f / a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ss(a) simde_mm_rcp_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrsqrteq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_rsqrte(a_.altivec_f32); #elif defined(SIMDE_IEEE754_STORAGE) /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf Pages 100 - 103 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); #else simde_float32 x = a_.f32[i]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[i] = x; #endif } #elif defined(simde_math_sqrtf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_IEEE754_STORAGE) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1); #else simde_float32 x = a_.f32[0]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[0] = x; #endif } r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #elif defined(simde_math_sqrtf) r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_sad_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)); uint16_t r0 = t[0] + t[1] + t[2] + t[3]; r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0); #else uint16_t sum = 0; #if defined(SIMDE_HAVE_STDLIB_H) SIMDE_VECTORIZE_REDUCTION(+:sum) for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i])); } r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); r_.i16[1] = 0; r_.i16[2] = 0; r_.i16[3] = 0; #else HEDLEY_UNREACHABLE(); #endif #endif return simde__m64_from_private(r_); #endif } #define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b) # define _m_psadbw(a, b) simde_mm_sad_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ss (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ss(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0); #else return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ss(a) simde_mm_set_ss(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setr_ps(e3, e2, e1, e0); #else return simde_mm_set_ps(e0, e1, e2, e3); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setzero_ps (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setzero_ps(); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(SIMDE_FLOAT32_C(0.0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_splats(SIMDE_FLOAT32_C(0.0)); #else simde__m128 r; simde_memset(&r, 0, sizeof(r)); return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setzero_ps() simde_mm_setzero_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_undefined_ps (void) { simde__m128_private r_; #if defined(SIMDE_HAVE_UNDEFINED128) r_.n = _mm_undefined_ps(); #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) r_ = simde__m128_to_private(simde_mm_setzero_ps()); #endif return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_undefined_ps() simde_mm_undefined_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_setone_ps (void) { simde__m128 t = simde_mm_setzero_ps(); return simde_mm_cmpeq_ps(t, t); } SIMDE_FUNCTION_ATTRIBUTES void simde_mm_sfence (void) { /* TODO: Use Hedley. */ #if defined(SIMDE_X86_SSE_NATIVE) _mm_sfence(); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) __atomic_thread_fence(__ATOMIC_SEQ_CST); #elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9) __atomic_thread_fence(__ATOMIC_SEQ_CST); #else atomic_thread_fence(memory_order_seq_cst); #endif #elif defined(_MSC_VER) MemoryBarrier(); #elif HEDLEY_HAS_EXTENSION(c_atomic) __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) __sync_synchronize(); #elif defined(_OPENMP) #pragma omp critical(simde_mm_sfence_) { } #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sfence() simde_mm_sfence() #endif #define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \ const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \ simde__m64_from_private((simde__m64_private) { .i16 = \ SIMDE_SHUFFLE_VECTOR_(16, 8, \ (simde__tmp_a_).i16, \ (simde__tmp_a_).i16, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3), \ (((imm8) >> 6) & 3)) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_shuffle_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m64_private r_; simde__m64_private a_ = simde__m64_to_private(a); for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) { r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3]; } HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wconditional-uninitialized") # pragma clang diagnostic ignored "-Wconditional-uninitialized" #endif return simde__m64_from_private(r_); HEDLEY_DIAGNOSTIC_POP } #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8) #else # define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8) # define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_shuffle_ps(a, b, imm8) \ __extension__({ \ float32x4_t ret; \ ret = vmovq_n_f32( \ vgetq_lane_f32(a, (imm8) & (0x3))); \ ret = vsetq_lane_f32( \ vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \ ret, 1); \ ret = vsetq_lane_f32( \ vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \ ret, 2); \ ret = vsetq_lane_f32( \ vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \ ret, 3); \ }) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ simde__m128_from_private((simde__m128_private) { .f32 = \ SIMDE_SHUFFLE_VECTOR_(32, 16, \ simde__m128_to_private(a).f32, \ simde__m128_to_private(b).f32, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3) + 4, \ (((imm8) >> 6) & 3) + 4) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[(imm8 >> 0) & 3]; r_.f32[1] = a_.f32[(imm8 >> 2) & 3]; r_.f32[2] = b_.f32[(imm8 >> 4) & 3]; r_.f32[3] = b_.f32[(imm8 >> 6) & 3]; return simde__m128_from_private(r_); } #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vsqrtq_f32(a_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t est = vrsqrteq_f32(a_.neon_f32); for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) { est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est); } r_.neon_f32 = vmulq_f32(a_.neon_f32, est); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_sqrt(a_.altivec_f32); #elif defined(simde_math_sqrt) SIMDE_VECTORIZE for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) { r_.f32[i] = simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #elif defined(simde_math_sqrtf) r_.f32[0] = simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr, a_.wasm_v128); #else simde_memcpy(mem_addr, &a_, sizeof(a)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps1(mem_addr_, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_); #elif defined(SIMDE_SHUFFLE_VECTOR_) simde__m128_private tmp_; tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); simde_mm_store_ps(mem_addr_, tmp_.f32); #else SIMDE_VECTORIZE_ALIGNED(mem_addr_:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr_[i] = a_.f32[0]; } #endif #endif } #define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) # define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ss(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_lane_f32(mem_addr, a_.neon_f32, 0); #else *mem_addr = a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32)); #else simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest_->neon_f32 = vget_low_f32(a_.neon_f32); #else dest_->f32[0] = a_.f32[0]; dest_->f32[1] = a_.f32[1]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storer_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_reve(a_.altivec_f32), 0, mem_addr); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t tmp = vrev64q_f32(a_.neon_f32); vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2)); #elif defined(SIMDE_SHUFFLE_VECTOR_) a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); #else SIMDE_VECTORIZE_ALIGNED(mem_addr:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeu_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) vec_vsx_st(a_.altivec_f32, 0, mem_addr); #else simde_memcpy(mem_addr, &a_, sizeof(a_)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 - b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] - b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sub_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] - b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] == b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] == b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] >= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] >= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] > b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] > b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] <= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] <= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] < b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] < b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] != b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] != b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b)) #endif #if defined(SIMDE_X86_SSE_NATIVE) # if defined(__has_builtin) # if __has_builtin(__builtin_ia32_undef128) # define SIMDE_HAVE_UNDEFINED128 # endif # elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER) # define SIMDE_HAVE_UNDEFINED128 # endif #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpackhi_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_high_f32(a_.neon_f32); float32x2_t b1 = vget_high_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7); #else r_.f32[0] = a_.f32[2]; r_.f32[1] = b_.f32[2]; r_.f32[2] = a_.f32[3]; r_.f32[3] = b_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpacklo_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_low_f32(a_.neon_f32); float32x2_t b1 = vget_low_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = b_.f32[0]; r_.f32[2] = a_.f32[1]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr), a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest->i64[0] = vget_lane_s64(a_.neon_i64, 0); #else dest->i64[0] = a_.i64[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_stream_ps(mem_addr, a); #elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m128_private a_ = simde__m128_to_private(a); __builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr)); #else simde_mm_store_ps(mem_addr, a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ vget_low_f32(ROW23.val[0])); \ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ vget_low_f32(ROW23.val[1])); \ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ vget_high_f32(ROW23.val[0])); \ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ vget_high_f32(ROW23.val[1])); \ } while (0) #else #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ simde__m128 tmp3, tmp2, tmp1, tmp0; \ tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ row0 = simde_mm_movelh_ps(tmp0, tmp2); \ row1 = simde_mm_movehl_ps(tmp2, tmp0); \ row2 = simde_mm_movelh_ps(tmp1, tmp3); \ row3 = simde_mm_movehl_ps(tmp3, tmp1); \ } while (0) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) #endif #if defined(_MM_EXCEPT_INVALID) # define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID #else # define SIMDE_MM_EXCEPT_INVALID (0x0001) #endif #if defined(_MM_EXCEPT_DENORM) # define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM #else # define SIMDE_MM_EXCEPT_DENORM (0x0002) #endif #if defined(_MM_EXCEPT_DIV_ZERO) # define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO #else # define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004) #endif #if defined(_MM_EXCEPT_OVERFLOW) # define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW #else # define SIMDE_MM_EXCEPT_OVERFLOW (0x0008) #endif #if defined(_MM_EXCEPT_UNDERFLOW) # define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW #else # define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010) #endif #if defined(_MM_EXCEPT_INEXACT) # define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT #else # define SIMDE_MM_EXCEPT_INEXACT (0x0020) #endif #if defined(_MM_EXCEPT_MASK) # define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK #else # define SIMDE_MM_EXCEPT_MASK \ (SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \ SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \ SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT) #endif #if defined(_MM_MASK_INVALID) # define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID #else # define SIMDE_MM_MASK_INVALID (0x0080) #endif #if defined(_MM_MASK_DENORM) # define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM #else # define SIMDE_MM_MASK_DENORM (0x0100) #endif #if defined(_MM_MASK_DIV_ZERO) # define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO #else # define SIMDE_MM_MASK_DIV_ZERO (0x0200) #endif #if defined(_MM_MASK_OVERFLOW) # define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW #else # define SIMDE_MM_MASK_OVERFLOW (0x0400) #endif #if defined(_MM_MASK_UNDERFLOW) # define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW #else # define SIMDE_MM_MASK_UNDERFLOW (0x0800) #endif #if defined(_MM_MASK_INEXACT) # define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT #else # define SIMDE_MM_MASK_INEXACT (0x1000) #endif #if defined(_MM_MASK_MASK) # define SIMDE_MM_MASK_MASK _MM_MASK_MASK #else # define SIMDE_MM_MASK_MASK \ (SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \ SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \ SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT) #endif #if defined(_MM_FLUSH_ZERO_MASK) # define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK #else # define SIMDE_MM_FLUSH_ZERO_MASK (0x8000) #endif #if defined(_MM_FLUSH_ZERO_ON) # define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON #else # define SIMDE_MM_FLUSH_ZERO_ON (0x8000) #endif #if defined(_MM_FLUSH_ZERO_OFF) # define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF #else # define SIMDE_MM_FLUSH_ZERO_OFF (0x0000) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_X86_SSE_H) */
OMParrayMulti.c
// Array multiplication using OpenMP and MPI // Author: Dimitris Gravanis, 2017 /* work in progress */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "mpi.h" int main(int argc, char *argv[]) { int *A, *B, *C; // array pointers int N; // array size int i; // iterator double start, stop; // timer start, stop /* MPI variables */ int id, P; // rank, size /* MPI Start */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(MPI_COMM_WORLD, &P); if (id == 0) { printf("\nInsert array size N: "); scanf("%d", &N); } MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD); A = malloc(N * sizeof(int)); B = malloc(N * sizeof(int)); C = malloc(N * sizeof(int)); for (i = 0; i < N; i++) { A[i] = rand()%10; B[i] = rand()%10; } if (id == 0) { start = omp_get_wtime(); // timer start } #pragma omp parallel for private(i) for (i = 0; i < N; i++) { C[i] = A[i] * B[i]; } if (id == 0) { stop = omp_get_wtime(); // timer stop /* display results */ printf("\n\n"); printf("Input arrays"); printf("\nA:\n"); for (i = 0; i < N; i++) { printf("%4d", A[i]); if (i == N - 1) { printf("\n"); } } printf("\nB:\n"); for (i = 0; i < N; i++) { printf("%4d", B[i]); if (i == N - 1) { printf("\n"); } } printf("\n\n"); printf("Output array"); printf("\nC:\n"); for (i = 0; i < N; i++) { printf("%4d", C[i]); if (i == N - 1) { printf("\n"); } } printf("\n\n"); printf("Total run time: %.6fs", stop - start); printf("\n\n"); } MPI_Finalize(); return 0; }
6755.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp parallel for simd schedule(static, 2) num_threads(2) for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp parallel for simd schedule(static, 2) num_threads(2) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
geo_yeefdtd_rect.kernel_runtime.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include "local_header.h" #include "openmp_pscmc_inc.h" #include "geo_yeefdtd_rect.kernel_inc.h" int openmp_RECT_YEE_CURL_L_init (openmp_pscmc_env * pe ,openmp_RECT_YEE_CURL_L_struct * kerstr ){ return 0 ;} void openmp_RECT_YEE_CURL_L_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_RECT_YEE_CURL_L_struct )); } int openmp_RECT_YEE_CURL_L_get_num_compute_units (openmp_RECT_YEE_CURL_L_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_RECT_YEE_CURL_L_get_xlen (){ return IDX_OPT_MAX ;} int openmp_RECT_YEE_CURL_L_exec (openmp_RECT_YEE_CURL_L_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_RECT_YEE_CURL_L_scmc_kernel ( ( kerstr )->inoutE1 , ( kerstr )->inB0 , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_RECT_YEE_CURL_L_scmc_set_parameter_inoutE1 (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inoutE1 = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_inB0 (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inB0 = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_xoffset (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_yoffset (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_zoffset (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_y_cpu_core (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_numvec (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_XLEN (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_YLEN (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_ZLEN (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_ovlp (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_xblock (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_yblock (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_zblock (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_num_ele (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_DT (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_DELTA_X (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_X = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_DELTA_Y (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Y = pm->d_data); } int openmp_RECT_YEE_CURL_L_scmc_set_parameter_DELTA_Z (openmp_RECT_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Z = pm->d_data); }
mttkrp_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include "hicoo.h" #include <omp.h> #define CHUNKSIZE 1 int ptiOmpMTTKRPHiCOOKernels( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOBlocks( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb); int ptiOmpMTTKRPHiCOOBlocks_3D( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb); int ptiOmpMTTKRPHiCOOKernelsBlocks( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb); int ptiOmpMTTKRPHiCOOKernelsBlocks_3D( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb); int ptiOmpMTTKRPHiCOOKernels_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRPHiCOOBlocks_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb); int ptiOmpMTTKRPHiCOOBlocks_3D_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb); int ptiOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb); int ptiOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb); /** * Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] scratch an temporary array to store intermediate results, space assigned before this function * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int ptiOmpMTTKRPHiCOO( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { ptiAssert(ptiOmpMTTKRPHiCOOKernels(hitsr, mats, mats_order, mode, tk) == 0); } else if(tk == 1 && tb > 1) { ptiAssert(ptiOmpMTTKRPHiCOOBlocks(hitsr, mats, mats_order, mode, tb) == 0); } else if(tk > 1 && tb > 1) { ptiAssert(ptiOmpMTTKRPHiCOOKernelsBlocks(hitsr, mats, mats_order, mode, tk, tb) == 0); } else if(tk == 1 && tb == 1) { printf("Should specify sequetial MTTKRP.\n"); return -1; } return 0; } int ptiOmpMTTKRPHiCOO_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); } else if(tk == 1 && tb > 1) { ptiAssert(ptiOmpMTTKRPHiCOOBlocks_MatrixTiling(hitsr, mats, mats_order, mode, tb) == 0); } else if(tk > 1 && tb > 1) { ptiAssert(ptiOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling(hitsr, mats, mats_order, mode, tk, tb) == 0); } else if(tk == 1 && tb == 1) { printf("Should specify sequetial MTTKRP with -d -2.\n"); return -1; } return 0; } int ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb, int balanced) { if(tk > 1 && tb == 1) { if (balanced == 0) ptiAssert(ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); else if (balanced == 1) ptiAssert(ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb, int balanced) { if(tk > 1 && tb == 1) { if(balanced == 0) ptiAssert(ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); else if (balanced == 1) ptiAssert(ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce_Two( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int ptiOmpMTTKRPHiCOOKernels( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D(hitsr, mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const vals = hitsr->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiMatrix * const M = mats[nmodes]; ptiValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ ptiIndex * block_coord = (ptiIndex*)malloc(nmodes * sizeof(*block_coord)); ptiIndex * ele_coord = (ptiIndex*)malloc(nmodes * sizeof(*ele_coord)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(ptiIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiNnzIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(ptiIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex tmp_i = ele_coord[times_mat_index]; ptiValue const entry = vals[z]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } ptiIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries } // End loop blocks /* Free thread-private space */ free(block_coord); free(ele_coord); ptiFreeValueVector(&scratch); } // End loop kernels // omp_destroy_lock(&lock); return 0; } int ptiOmpMTTKRPHiCOOKernels_3D( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; ptiBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; ptiBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; ptiIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; ptiIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOKernels_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; ptiValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; ptiValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop parallel iterations */ for(ptiIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); if(i >= kschr_mode[k].len) continue; ptiIndex kptr_loc = kschr_mode[k].data[i]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; ptiNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; ptiIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; ptiIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop partitions */ for(ptiIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); ptiIndex j_begin = kschr_balanced_pos_mode[i].data[p]; ptiIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(ptiIndex j = j_begin; j < j_end; ++j) { ptiIndex kernel_num = kschr_balanced_mode[i].data[j]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); ptiIndex kernel_num = hitsr->kschr_rest[mode].data[k]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; ptiValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_mode = hitsr->kschr[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop parallel iterations */ for(ptiIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); // printf("tid: %d, (i, k): (%u, %u)\n", tid, i, k); if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } ptiIndex kptr_loc = kschr_mode[k].data[i]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; ptiNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; ptiIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; ptiIndex npars = hitsr->nkpars[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop partitions */ for(ptiIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); ptiIndex j_begin = kschr_balanced_pos_mode[i].data[p]; ptiIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(ptiIndex j = j_begin; j < j_end; ++j) { ptiIndex kernel_num = kschr_balanced_mode[i].data[j]; // printf("tid: %d, (i, j): (%u, %u), kernel_num: %u\n", tid, i, j, kernel_num); ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); ptiIndex kernel_num = hitsr->kschr_rest[mode].data[k]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; ptiValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(ptiIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; ptiIndex kptr_loc = kschr_mode[k].data[i]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; ptiNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; ptiIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; ptiIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(ptiIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; ptiIndex j_begin = kschr_balanced_pos_mode[i].data[p]; ptiIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(ptiIndex j=j_begin; j<j_end; ++j) { ptiIndex kernel_num = kschr_balanced_mode[i].data[j]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End kernels in a partition } // End loop kernels } // End loop iterations /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); ptiIndex kernel_num = hitsr->kschr_rest[mode].data[k]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; /* Use copy_mats to reduce atomics */ ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; ptiValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(ptiIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } ptiIndex kptr_loc = kschr_mode[k].data[i]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; ptiNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; ptiIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; ptiIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS ptiNnzIndex * thread_nnzs = (ptiNnzIndex*)malloc(tk * sizeof(ptiNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(ptiNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(ptiIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; ptiIndex j_begin = kschr_balanced_pos_mode[i].data[p]; ptiIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(ptiIndex j=j_begin; j<j_end; ++j) { ptiIndex kernel_num = kschr_balanced_mode[i].data[j]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End kernels in a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(ptiIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); ptiIndex kernel_num = hitsr->kschr_rest[mode].data[k]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; ptiNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* Use copy_mats to reduce atomics */ ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; ptiValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } /* Calculate load balance of kernels */ #ifdef NNZ_STATISTICS ptiNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"HIPARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", max_nnzs: %"HIPARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); ptiAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int ptiOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_mode = hitsr->kschr[mode]; int tk2 = 2; /* Loop parallel iterations */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk/tk2) for(ptiIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ // TODO: cannot compile using icc // #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk2) for(ptiIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; ptiIndex kptr_loc = kschr_mode[k].data[i]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; ptiNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk/tk2; ++t) { #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int ptiOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiRankMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); ptiIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; ptiIndexVector * restrict kschr_mode = hitsr->kschr[mode]; int tk2 = 2; /* Loop parallel iterations */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk/tk2) for(ptiIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ // Cannot compile using icc // #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk2) for(ptiIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } ptiIndex kptr_loc = kschr_mode[k].data[i]; ptiNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; ptiNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ ptiValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk/tk2; ++t) { #pragma omp simd for(ptiElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int ptiOmpMTTKRPHiCOOBlocks( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOBlocks_3D(hitsr, mats, mats_order, mode, tb) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const vals = hitsr->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiMatrix * const M = mats[nmodes]; ptiValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ ptiIndex * block_coord = (ptiIndex*)malloc(nmodes * sizeof(*block_coord)); ptiIndex * ele_coord = (ptiIndex*)malloc(nmodes * sizeof(*ele_coord)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Block indices */ for(ptiIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(ptiIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex tmp_i = ele_coord[times_mat_index]; ptiValue const entry = vals[z]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } ptiIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries /* Free thread-private space */ free(block_coord); free(ele_coord); ptiFreeValueVector(&scratch); } // End loop blocks } // End loop kernels // omp_destroy_lock(&lock); return 0; } int ptiOmpMTTKRPHiCOOBlocks_3D( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; ptiBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; ptiBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; ptiIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; ptiIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOBlocks_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb) { ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOBlocks_3D_MatrixTiling(hitsr, mats, mats_order, mode, tb) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOBlocks_3D_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tb) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOKernelsBlocks( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { omp_set_nested(1); omp_set_dynamic(0); ptiIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernelsBlocks_3D(hitsr, mats, mats_order, mode, tk, tb) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const vals = hitsr->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiMatrix * const M = mats[nmodes]; ptiValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ ptiIndex * block_coord = (ptiIndex*)malloc(nmodes * sizeof(*block_coord)); ptiIndex * ele_coord = (ptiIndex*)malloc(nmodes * sizeof(*ele_coord)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Block indices */ for(ptiIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(ptiIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex tmp_i = ele_coord[times_mat_index]; ptiValue const entry = vals[z]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } ptiIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries /* Free thread-private space */ free(block_coord); free(ele_coord); ptiFreeValueVector(&scratch); } // End loop blocks } // End loop kernels // omp_destroy_lock(&lock); return 0; } int ptiOmpMTTKRPHiCOOKernelsBlocks_3D( ptiSparseTensorHiCOO const * const hitsr, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { omp_set_nested(1); omp_set_dynamic(0); ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { ptiBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; ptiBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; ptiBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; ptiIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; ptiIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { ptiIndex const nmodes = hitsr->nmodes; omp_set_nested(1); omp_set_dynamic(0); if(nmodes == 3) { ptiAssert(ptiOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk, tb) == 0); return 0; } ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ ptiValue ** blocked_times_mat = (ptiValue**)malloc(nmodes * sizeof(*blocked_times_mat)); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); /* Blocked matrices */ for(ptiIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ ptiIndex times_mat_index = mats_order[1]; ptiElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; ptiValue const entry = vals[z]; for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(ptiIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; for(ptiElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(ptiBlockMatrixIndex)tmp_i * stride + r]; } } ptiElementIndex const mode_i = hitsr->einds[mode].data[z]; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries /* Free thread-private space */ free(blocked_times_mat); ptiFreeValueVector(&scratch); } // End loop blocks } // End loop kernels return 0; } int ptiOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling( ptiSparseTensorHiCOO const * const hitsr, ptiRankMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, const int tb) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const * const ndims = hitsr->ndims; ptiValue const * const restrict vals = hitsr->values.data; ptiElementIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiElementIndex const R = mats[mode]->ncols; ptiRankMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); ptiIndex times_mat_index_1 = mats_order[1]; ptiRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex times_mat_index_2 = mats_order[2]; ptiRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(ptiIndex k=0; k<hitsr->kptr.len - 1; ++k) { ptiNnzIndex kptr_begin = hitsr->kptr.data[k]; ptiNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(ptiIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ ptiValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; ptiValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; ptiNnzIndex bptr_begin = hitsr->bptr.data[b]; ptiNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(ptiIndex z=bptr_begin; z<bptr_end; ++z) { ptiElementIndex mode_i = hitsr->einds[mode].data[z]; ptiElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; ptiElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; ptiValue entry = vals[z]; for(ptiElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(ptiBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(ptiBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(ptiBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; }
a.24.1.c
/* { dg-do compile } */ /* { dg-require-effective-target tls } */ extern int omp_get_num_threads (void); int x, y, t, z[1000]; #pragma omp threadprivate(x) void a24 (int a) { const int c = 1; int i = 0; int l = 0; #pragma omp parallel default(none) private(a) shared(z) { int j = omp_get_num_threads (); /* O.K. - j is declared within parallel region */ /* O.K. - a is listed in private clause */ /* - z is listed in shared clause */ x = c; /* O.K. - x is threadprivate */ /* - c has const-qualified type */ z[i] = y; /* { dg-error "'i' not specified" "" { target *-*-* } 21 } */ /* { dg-error "enclosing parallel" "" { target *-*-* } 13 } */ /* { dg-error "'y' not specified" "" { target *-*-* } 21 } */ #pragma omp for firstprivate(y) for (i = 0; i < 10; i++) { z[i] = y; /* O.K. - i is the loop iteration variable */ /* - y is listed in firstprivate clause */ } z[l] = t; /* { dg-error "'l' not specified" "" { target *-*-* } 31 } */ /* { dg-error "'t' not specified" "" { target *-*-* } 31 } */ } }
ResNet-18_CPU_imagenet.c
/* Pretrained ResNet-18 Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: Jack/jocare Compilation: gcc -O3 ResNet-18_CPU_cifar.c -lm -fopenmp -o ResNet-18_CPU_cifar Usage: ResNet-18_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: ResNet-18_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #include "sparse.h" double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 20 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS /****************************************************************************************************************************/ int im_sizes[20] = { 224, 112, 56, 56, 56, 28, 28, 28, 28, 28, 14, 14, 14, 14, 14, 7, 7, 7, 7, 7}; //int im_sizes[20] = { 224, 224, 224, 224, 224, 112, 112, 112, 112, 56, 56, 56, 56, 56, 28, 28, 28, 28, 28 }; // Weights and image block START float ***image; #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[20][4] = { { 64, 3, CONV_SIZE, CONV_SIZE }, { 13, 64, CONV_SIZE, CONV_SIZE }, { 64, 13, CONV_SIZE, CONV_SIZE }, { 11, 64, CONV_SIZE, CONV_SIZE }, { 64, 11, CONV_SIZE, CONV_SIZE }, { 31, 64, CONV_SIZE, CONV_SIZE }, { 128, 31, CONV_SIZE, CONV_SIZE }, { 31, 64, 1, 1 }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 13, 128, CONV_SIZE, CONV_SIZE }, { 40, 13, CONV_SIZE, CONV_SIZE }, { 256, 40, CONV_SIZE, CONV_SIZE }, { 40, 13, 1, 1 }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 19, 256, CONV_SIZE, CONV_SIZE }, { 19, 19, CONV_SIZE, CONV_SIZE }, { 512, 19, CONV_SIZE, CONV_SIZE }, { 19, 19, 1, 1 }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 12, 512, CONV_SIZE, CONV_SIZE } }; // batch normalization layer shapes int bshape[20] = { 64, 13, 64, 11, 64, 31, 128, 128, 13, 128, 40, 256, 256, 19, 256, 19, 512, 512, 12, 512 }; // dense layer int dshape[1][2]= { { 512, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[20][4] = { { 64, 3, 3, 3 }, { 9, 64, CONV_SIZE, CONV_SIZE }, { 64, 9, CONV_SIZE, CONV_SIZE }, { 10, 64, CONV_SIZE, CONV_SIZE }, { 64, 10, CONV_SIZE, CONV_SIZE }, { 23, 64, CONV_SIZE, CONV_SIZE }, { 128, 23, CONV_SIZE, CONV_SIZE }, { 128, 64, 1, 1 }, { 7, 128, CONV_SIZE, CONV_SIZE }, { 128, 7, CONV_SIZE, CONV_SIZE }, { 30, 128, CONV_SIZE, CONV_SIZE }, { 256, 30, CONV_SIZE, CONV_SIZE }, { 256, 128, 1, 1 }, { 15, 256, CONV_SIZE, CONV_SIZE }, { 256, 15, CONV_SIZE, CONV_SIZE }, { 15, 256, CONV_SIZE, CONV_SIZE }, { 512, 15, CONV_SIZE, CONV_SIZE }, { 512, 256, 1, 1 }, { 10, 512, CONV_SIZE, CONV_SIZE }, { 512, 10, CONV_SIZE, CONV_SIZE } }; // batch normalization layer shapes int bshape[20] = { 64, 9, 64, 10, 64, 23, 128, 128, 7, 128, 30, 256, 256, 15, 256, 15, 512, 512, 10, 512 }; // dense layer int dshape[1][2]= { { 512, 10} }; #else // FISHER PRUNING int cshape[20][4] = { { 64, 3, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 128, 64, CONV_SIZE, CONV_SIZE }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 128, 64, 1, 1 }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 256, 128, CONV_SIZE, CONV_SIZE }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 256, 128, 1, 1 }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 512, 256, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 256, 1, 1 }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE } }; // batch normalization layer shapes int bshape[CONV_LEVELS] = { 64, 64, 64, 64, 64, 64, 128, 128, 128, 128, 128, 256, 256, 256, 256, 256, 512, 512, 512, 512 }; // dense layer int dshape[1][2]= { { 512, 1000} }; #endif // FISHER_PRUNING float *****wc; // weights convolution float **bc; // biases convolution float ***wd; // weights dense float **bd; // biases dense #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS float batchnorm_weights[CONV_LEVELS][512]; float batchnorm_biases[CONV_LEVELS][512]; // Blocks for intermediate convolutions int mem_block_shape[3] = {512, SIZE, SIZE}; // not optimal defining 512 statically float ***mem_block1; float ***mem_block2; float ***shortcut_mem; // Blocks for dense flatten layers int mem_block_dense_shape = { 512 * 1 * 1 }; // size of layer before the fully connected float *mem_block1_dense; float *mem_block2_dense; // Weights and image block END /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.f; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.f; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; // Init image memory image = malloc(3 * sizeof(float**)); for (i = 0; i < 3; i++) { image[i] = malloc(SIZE * sizeof(float*)); for (j = 0; j < SIZE; j++) { image[i][j] = malloc(SIZE * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t****) malloc(CONV_LEVELS * sizeof(csr_t***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t***) malloc(cshape[l][0] * sizeof(csr_t**)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t**) malloc(cshape[l][1] * sizeof(csr_t*)); } } // wc memory allocated below will be freed in read_weights if SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS // Init convolution weights wc = malloc(CONV_LEVELS * sizeof(float****)); bc = malloc(CONV_LEVELS * sizeof(float*)); for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } bc[l] = malloc(cshape[l][0] * sizeof(float)); } // Init dense weights wd = malloc(2 * sizeof(float**)); bd = malloc(2 * sizeof(float*)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float*)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // Init mem_blocks // this size could be dynamic mem_block1 = malloc(mem_block_shape[0] * sizeof(float**)); mem_block2 = malloc(mem_block_shape[0] * sizeof(float**)); shortcut_mem = malloc(mem_block_shape[0] * sizeof(float**)); for (i = 0; i < mem_block_shape[0]; i++) { mem_block1[i] = malloc(mem_block_shape[1] * sizeof(float*)); mem_block2[i] = malloc(mem_block_shape[1] * sizeof(float*)); shortcut_mem[i] = malloc(mem_block_shape[1] * sizeof(float*)); for (j = 0; j < mem_block_shape[1]; j++) { mem_block1[i][j] = malloc(mem_block_shape[2] * sizeof(float)); mem_block2[i][j] = malloc(mem_block_shape[2] * sizeof(float)); shortcut_mem[i][j] = malloc(mem_block_shape[2] * sizeof(float)); } } // reset_mem_block(mem_block1); // reset_mem_block(mem_block2); // Init mem blocks dense mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); // Init batchnorm blocks //batchnorm_weights = malloc(2 * sizeof(float*)); //batchnorm_biases = malloc(2 * sizeof(float*)); //for (int z = 0; z < 20; z++) { //batchnorm_weights[z] = malloc(512 * sizeof(float)); //batchnorm_biases[z] = malloc(512 * sizeof(float)); //} } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free image memory for (i = 0; i < 3; i++) { for (j = 0; j < SIZE; j++) { free(image[i][j]); } free(image[i]); } free(image); // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif free(bc[l]); } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS free(bc); // Free dense weights /* for (l = 0; l < 2; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); */ // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(mem_block1[i][j]); free(mem_block2[i][j]); free(shortcut_mem[i][j]); } free(mem_block1[i]); free(mem_block2[i]); free(shortcut_mem[i]); } free(mem_block1); free(mem_block2); free(shortcut_mem); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { /* weights are written out as: - 20 x convolutional weights NO bias - 20 x batchnorm weights with bias - 1 x fc weights with bias */ float dval; int i, j, k, l, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen64(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (z = 0; z < CONV_LEVELS; z++) { printf("Read conv block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { for (j = 0; j < cshape[z][1]; j++) { for (k = 0; k < cshape[z][2]; k++) { for (l = 0; l < cshape[z][3]; l++) { fscanf(iin, "%f", &dval); wc[z][i][j][k][l] = dval; } } } } total_lvls_read += 1; } /* // run this to check conv weights are correct z = 19; // print back to verify for (i = 0; i < cshape[z][0]; i++) { for (j = 0; j < cshape[z][1]; j++) { for (k = 0; k < cshape[z][2]; k++) { for (l = 0; l < cshape[z][3]; l++) { printf("conv 5: %f \n", wc[z][i][j][k][l]); } } } } return; */ for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < bshape[z]; i++) { fscanf(iin, "%f", &dval); //printf("weight %i : %f \n", i, dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < bshape[z]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); //printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { //printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); csr_t* a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); //print_csr(a); wc_sparse[l][i][j] = a; //printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float*****) malloc(1 * sizeof(float****)); wc_first_conv[l] = (float****) malloc(cshape[l][0] * sizeof(float***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float***) malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float**) malloc(cshape[l][2] * sizeof(float*)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float*) malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); image[l][i][j] = dval; // printf("i[%d][%d][%d]:%f\n", i, j, l, dval); } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } for (i = 0; i < size; i=i+stride) { for (j = 0; j < size; j=j+stride) { sum = zeropad[i ][j ] * kernel[0][0] + zeropad[i ][j + 1] * kernel[0][1] + zeropad[i ][j + 2] * kernel[0][2] + zeropad[i + 1][j ] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j ] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); } /****************************************************************************************************************************/ void convolution_3_x_3_sparse(float **matrix, csr_t* kernel, float **out, int size, int stride) { // printf("sparse\n"); int i, j; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } // // convolution // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // out[i][j] += s_csr_conv(kernel, zeropad, i, j); // } // } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); int k,l; float sum; // convolution for (i = 0; i < size; i+=stride) { for (j = 0; j < size; j+=stride) { //out[i][j] += s_csr_conv(kernel, zeropad, i, j); sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ void convolution_1_x_1(float **matrix, float **kernel, float **out, int size) { int i, j; float sum; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { sum = zeropad[i][j] * kernel[0][0]; out[i][j] += sum; } } } /****************************************************************************************************************************/ void convolution_1_x_1_sparse(float **matrix, csr_t *kernel, float **out, int size) { int i, j; float sum; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } int k,l; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { //sum = zeropad[i][j] * kernel[0][0]; //out[i][j] += sum; sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ // no bias void add_relu(float **out, int size) { int i, j; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { if (out[i][j] < 0) out[i][j] = 0.f; } } } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total++; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { int i, j; //#pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } /****************************************************************************************************************************/ void batchnorm(float ***in, float ***out, float *weights, float *bias, int num_channels, int im_size) { int channel, i, j; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(channel = 0; channel < num_channels; channel++) { for(i = 0; i < im_size; i++) { for(j = 0; j < im_size; j++) { out[channel][i][j] = in[channel][i][j] * weights[channel] + bias[channel]; } } } } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, int num_channels, int im_size) { int channel, i, j; #pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(channel = 0; channel < num_channels; channel++) { for(i = 0; i < im_size; i++) { for(j = 0; j < im_size; j++) { out[channel][i][j] = in[channel][i][j] * weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ float avg_of(float **in, int start_x, int start_y, int kernel_size) { float sum = 0.; int i, j; for(i = 0; i < kernel_size; ++i) { for(j = 0; j < kernel_size; ++j) { sum += in[start_x+i][start_y+j]; } } return sum / (kernel_size * kernel_size); } /****************************************************************************************************************************/ void avg_pool(float ***in, float ***out, int channels, int k_size, int image_size) { int c; for(c = 0; c < channels; ++c) { out[c][0][0] = avg_of(in[c],0,0, 7); //out[c][0][1] = avg_of(in[c],0,6, 7); //out[c][1][0] = avg_of(in[c],6,0, 7); //out[c][1][1] = avg_of(in[c],6,6, 7); } } /****************************************************************************************************************************/ void dump_image() { int i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < SIZE; j++) { for (k = 0; k < SIZE; k++) { printf("%.12lf\n", image[i][j][k]); } } } } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c=0; if (only_convolution == 1) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g ", mem_block1_dense[i]); } } else { double maximum=-1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g ", mem_block2_dense[i]); if(mem_block1_dense[i]>maximum){ maximum=mem_block2_dense[i]; c=i+1; } } //fprintf(out, "\n"); printf("-------------------------\n"); printf("This image depicts class: %d\n",c); } } /****************************************************************************************************************************/ void conv_norm_block(int level, int shortcut) { int in_planes = cshape[level][1]; int i, j, k; // if shortcut then save image for layer if(shortcut==1) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { shortcut_mem[i][j][k] = mem_block1[i][j][k]; } } } } //int in_planes = cshape[level][0] int out_planes = cshape[level][0]; int stride = 1; //------------------------------------------------------------------------------------------------------------------------------- // conv 1 #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < out_planes; i++) { for (j = 0; j < in_planes; j++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride); #else convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], im_sizes[level], stride); #endif // SPARSE_CONVOLUTIONS } } batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // for(i = 0; i < out_planes; i++) { // add_relu(mem_block1[i], im_sizes[level]); // } reset_mem_block(mem_block2); //------------------------------------------------------------------------------------------------------------------------------- // conv 2 level += 1; in_planes = cshape[level][1]; out_planes = cshape[level][0]; //------------------------------------------------------------------------------------------------------------------------------- #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < out_planes; i++) { for (j = 0; j < in_planes; j++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride); #else convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], im_sizes[level], stride); #endif // SPARSE_CONVOLUTIONS } } batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // for(i = 0; i < out_planes; i++) { // add_relu(mem_block1[i], im_sizes[level]); // } reset_mem_block(mem_block2); // if shortcut: conv bn + out if(shortcut==1) { level += 1; in_planes = cshape[level][1]; out_planes = cshape[level][0]; for (i = 0; i < out_planes; i++) { for (j = 0; j < in_planes; j++) { #if SPARSE_CONVOLUTIONS convolution_1_x_1_sparse(shortcut_mem[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level]); #else convolution_1_x_1(shortcut_mem[j], wc[level][i][j], mem_block2[i], im_sizes[level]); #endif // SPARSE_CONVOLUTIONS } } batchnorm_and_relu(mem_block2, shortcut_mem, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // batchnorm(mem_block2, shortcut_mem, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // for(i = 0; i < out_planes; i++) { // add_relu(shortcut_mem[i], im_sizes[level]); // } reset_mem_block(mem_block2); // add results for(i = 0; i < out_planes; i++) { for(j = 0; j < im_sizes[level]; j++) { for(k = 0; k < im_sizes[level]; k++) { mem_block1[i][j][k] = mem_block1[i][j][k] + shortcut_mem[i][j][k]; } } } } } /****************************************************************************************************************************/ void get_resnet18_predict(FILE *out, int only_convolution) { int i, j, k; int level = 0; // Init intermediate memory reset_mem_block(mem_block1); reset_mem_block(mem_block2); reset_mem_block_dense(mem_block1_dense); reset_mem_block_dense(mem_block2_dense); //------------------------------------------------------------------------------------------------------------------------------- // Layer 1 (Convolution 3 -> 64) //add_relu(mem_block2[i], 32); ///???? WHY DO WE NEED THIS HERE? // print the image /* for (i = 0; i < 32; i++) { for (j = 0; j < 32; j++) { for (k = 0; k < 3; k++) { printf("%f \n", image[k][i][j]); } } } return; */ int counter = 0; for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(image[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride); #else convolution_3_x_3(image[j], wc[level][i][j], mem_block2[i], im_sizes[level], 1); #endif // FIRST_CONV_SPARSE } // [print content of mem block] /* for(int m = 0; m < 32; m++) { for(int n = 0; n < 32; n++) { printf("%i: %f\n", counter, mem_block1[i][m][n]); counter++; } } */ //relu(mem_block2[i], 32); } batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], 64, 32); // batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], 64, 32); // for(i = 0; i < cshape[level][0]; i++) { // add_relu(mem_block1[i], 32); // } reset_mem_block(mem_block2); /* counter = 0; // print mem block 2: for (i = 0; i < 64; i++) { for (j = 0; j < 32; j++) { for (k = 0; k < 32; k++) { counter++; if (counter < 100) { printf("%i: %f\n",counter, mem_block2[i][j][k]); } } } } return; */ level++; //------------------------------------------------------------------------------------------------------------------------------- int shortcut = 1; int no_shortcut = 0; // 2 blocks of 64 conv_norm_block(level, no_shortcut); level+=2; conv_norm_block(level, no_shortcut); level+=2; // 2 blocks of 128 conv_norm_block(level, shortcut); level+=3; conv_norm_block(level, no_shortcut); level+=2; // 2 blocks of 256 conv_norm_block(level, shortcut); level+=3; conv_norm_block(level, no_shortcut); level+=2; // 2 blocks of 512 conv_norm_block(level, shortcut); level+=3; conv_norm_block(level, no_shortcut); level+=2; level = level - 1; // average pool 7 with 1024 channels of 7x7 images avg_pool(mem_block1, mem_block2, 512, 7, 7); // flatten flatten(mem_block2, mem_block1_dense, cshape[level][0], 1, 1); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str){ char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf("Usage: <program.exe> <weights file> <images list file> <output file> <only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; //printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = CONV_LEVELS; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { fgets(buf, 512, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); // normalize_image(); // dump_image(); gettimeofday(&tStart, NULL); // get_resnet18_predict(only_convolution); get_resnet18_predict(results, only_convolution); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); // output_predictions(results, only_convolution); output_predictions(results, only_convolution, 512, 1); } //free_memory(); fclose(file_list); return 0; }
linramp.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) static PyObject *linramp(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *linramp(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *etc; PyArrayObject *x,*y, *rampparams; double a,b,x0; int i; npy_intp dims[1]; static char *kwlist[] = {"rampparams","x","etc",NULL}; // etc = PyList_New(0); if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc)) { return NULL; } a = IND(rampparams,0); b = IND(rampparams,1); x0 = IND(rampparams,2); dims[0] = x->dimensions[0]; y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE); #pragma omp parallel for for(i=0;i<dims[0];i++) { IND(y,i) = a*(IND(x,i)-x0)+b; } return PyArray_Return(y); } static char module_docstring[] = "\ This function creates a model that fits a ramp using a linear polynomial.\n\ \n\ Parameters\n\ ----------\n\ x0: time offset\n\ a: coefficient of first term\n\ b: constant\n\ x: Array of time/phase points\n\ \n\ Returns\n\ -------\n\ This function returns the flux values for the ramp models \n\ \n\ Revisions\n\ ---------\n\ 2008-07-07 Kevin Stevenson, UCF \n\ kevin218@knights.ucf.edu\n\ Original version\n\ 2010-12-25 Nate Lust, UCF\n\ natelust at linux dot com\n\ Converted to C\n\ 2018-11-22 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\ "; static PyMethodDef module_methods[] = { {"linramp",(PyCFunction)linramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}}; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_linramp(void) #else initlinramp(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "linramp", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("linramp", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
O2VertIntegration.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_vi; void O2VertIntegration(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { gv_vi->data_pointer.p2[(block_index)][(cell_index)] += gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } }
GB_unop__identity_uint8_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_fp32) // op(A') function: GB (_unop_tran__identity_uint8_fp32) // C type: uint8_t // A type: float // cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_fp32) ( uint8_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mixed_tentusscher_myo_epi_2004_S2_3.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_3.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6407442866583,0.00127024730863006,0.781477837871060,0.781226285372551,0.000173058844459830,0.485844316142820,0.00292517461971129,0.999998371825952,1.91031873007277e-08,1.87288135192733e-05,0.999773522474666,1.00766286802375,0.999999451356628,3.16576129409975e-05,0.737961690357158,10.2441215797546,139.210514590526}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5383636643555,0.000359007183612285,0.000154135859579797,0.000217532604523131,0.265156052763393,0.186639850277223,0.149365610424309,3.43320580539409,0.0166941723782826,1.45123160724562,1094.13527370174,0.000494385096732911,0.269171393030809,0.0183256017779276,0.00468024174172971,1.50869252254344e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
openMP_for_par.c
#ifdef _OPENMP #include <omp.h> #endif #include <cover_functions.h> unsigned short int nb_thread; unsigned long long nb_sol = 0; void solve_OMP(const struct instance_t * instance, struct context_t ** ctxs) { int i, chosen_item; struct sparse_array_t * active_options; chosen_item = choose_next_item(ctxs[0]); active_options = ctxs[0]->active_options[chosen_item]; #pragma omp parallel for schedule(static, 1) for (i = 0; i < nb_thread; ++i) { cover(instance, ctxs[i], chosen_item); ctxs[i]->num_children[0] = active_options->len; } #pragma omp parallel for schedule(dynamic) for (i = 0; i < active_options->len; ++i) { unsigned short int my_thread = omp_get_thread_num(); int option = active_options->p[i]; ctxs[my_thread]->child_num[ctxs[my_thread]->level] = i; choose_option(instance, ctxs[my_thread], option, chosen_item); solve(instance, ctxs[my_thread]); if (ctxs[my_thread]->solutions >= max_solutions) exit(0); unchoose_option(instance, ctxs[my_thread], option, chosen_item); } #pragma omp parallel for schedule(static, 1) reduction(+:nb_sol) for (i = 0; i < nb_thread; ++i) { uncover(instance, ctxs[i], chosen_item); /* backtrack */ nb_sol += ctxs[i]->solutions; } } int main(int argc, char **argv) { option_setup(argc, argv); nb_thread = omp_get_max_threads(); struct instance_t * instance = load_matrix(in_filename); struct context_t ** ctxs = (struct context_t **) malloc(nb_thread * sizeof(struct context_t *)); #pragma omp parallel for schedule(static, 1) for (unsigned short int i = 0; i < nb_thread; ++i) { ctxs[i] = backtracking_setup(instance); ctxs[i]->nodes = 1; } start = wtime(); solve_OMP(instance, ctxs); printf("FINI. Trouvé %lld solutions en %.2fs\n", nb_sol, wtime() - start); exit(EXIT_SUCCESS); }
omp_dotprod_openmp.c
/***************************************************************************** * FILE: omp_dotprod_openmp.c * DESCRIPTION: * This simple program is the OpenMP version of a dot product and the * second of four codes used to show the progression from a serial program to a * hybrid MPI/OpenMP program. The relevant codes are: * - omp_dotprod_serial.c - Serial version * - omp_dotprod_openmp.c - OpenMP only version * - omp_dotprod_mpi.c - MPI only version * - omp_dotprod_hybrid.c - Hybrid MPI and OpenMP version * SOURCE: Blaise Barney * LAST REVISED: 06/02/17 Blaise Barney ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> /* Define length of dot product vectors and number of OpenMP threads */ #define VECLEN 100 #define NUMTHREADS 8 int main (int argc, char* argv[]) { int i, tid, len=VECLEN, threads=NUMTHREADS; double *a, *b; double sum, psum; printf("Starting omp_dotprod_openmp. Using %d threads\n",threads); /* Assign storage for dot product vectors */ a = (double*) malloc (len*threads*sizeof(double)); b = (double*) malloc (len*threads*sizeof(double)); /* Initialize dot product vectors */ for (i=0; i<len*threads; i++) { a[i]=1.0; b[i]=a[i]; } /* Initialize global sum */ sum = 0.0; /* Perform the dot product in an OpenMP parallel region for loop with a sum reduction For illustration purposes: - Explicitly sets number of threads - Each thread keeps track of its partial sum */ #pragma omp parallel private(i,tid,psum) num_threads(threads) { psum = 0.0; tid = omp_get_thread_num(); #pragma omp for reduction(+:sum) for (i=0; i<len*threads; i++) { sum += (a[i] * b[i]); psum = sum; } printf("Thread %d partial sum = %f\n",tid, psum); } printf ("Done. OpenMP version: sum = %f \n", sum); free (a); free (b); }
IF97_Region2_met.c
// Copyright Martin Lord 2014-2014. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // IAPWS-IF97 Region 2: metastable vapour region <= 10 MPa equations /* ********************************************************************* * ******* VALIDITY ************ * * Valid in the metastable vapour region from the saturated vapour line * to the 5% equilibrium moisture line (determined from the equilibrium * h' and h'' values calculated at the given pressure) for pressures * from the triple poiint to 10 MPa * 611.657 Pa <= p <= 10 MPa * * Note: for temperatures between 273.15 and 273.16 K, the part of the * range of validity between the pressures on the saturation pressure * line (Eq 30) and on the sublimation line corresponds to metastable * states * ****************************************************************** */ /* ******************************************************************** * COMPILE AND LINK INSTRUCTIONS (gcc) * * * This library uses math.h, so must have the -lm link flag * * The library is programmed to be able to use OpenMP multithreading * use the -fopenmp complie flag to enable multithreadded code * * ***************************************************************** */ #include "IF97_common.h" //PSTAR TSTAR & sqr #include "IF97_Region2_met.h" #include <math.h> // for pow, log /* #ifdef _OPENMP // multithreading via libgomp # include <omp.h> #endif */ #include <stdio.h> //used for debugging only //*************************************************************** //****** REGION 2 GIBBS FREE ENERGY AND DERIVATIVES************** typedef struct sctGibbsCoeff_o { int Ji; double ni; } typR2coeff_o; // see Table 10 const typR2coeff_o GIBBS_COEFFS_R2MET_O[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{ 0 , -9.6937268393049} // 1 Different from r2 ,{ 1 , 10.087275970006 } // 2 Different from r2 ,{ -5 , -0.0056087911283 } ,{ -4 , 0.0714527380815 } ,{ -3 , -0.4071049822393 } ,{ -2 , 1.4240819171444 } ,{ -1 , -4.383951131945 } ,{ 2 , -0.2840863246077 } ,{ 3 , 0.0212684637533 } //9 }; const int MAX_GIBBS_COEFFS_R2MET_O = 9; // ideal gas part of dimensionless gibbs free energy in Region2 : See Equation 16 // checked OK double if97_r2met_Gamma_o (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_O; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_O[i].ni * pow( if97_tau, GIBBS_COEFFS_R2MET_O[i].Ji); } return log(if97_pi) + dblGammaSum; } typedef struct sctGibbsCoeff_r { int Ii; int Ji; double ni; } typR2coeff_r; // See table 16 const typR2coeff_r GIBBS_COEFFS_R2MET_R[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{1, 0, -0.73362260186506E-2} ,{1, 2, -0.88223831943146E-1} ,{1, 5, -0.72334555213245E-1} ,{1, 11, -0.40813178534455E-2} ,{2, 1, 0.20097803380207E-2} ,{2, 7, -0.53045921898642E-1} ,{2, 16, -0.76190409086970E-2} ,{3, 4, -0.63498037657313E-2} ,{3, 16, -0.86043093028588E-1} ,{4, 7, 0.75321581522770E-2} ,{4, 10, -0.79238375446139E-2} ,{5, 9, -0.22888160778447E-3} ,{5, 10, -0.26456501482810E-2} //13 }; const int MAX_GIBBS_COEFFS_R2MET_R = 13; // residual part of dimensionless gibbs free energy in Region2 : See Equation 17 // Checked OK double if97_r2met_Gamma_r (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_R; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_R[i].ni * pow(if97_pi, GIBBS_COEFFS_R2MET_R[i].Ii)* pow((if97_tau - 0.5), GIBBS_COEFFS_R2MET_R[i].Ji) ; } return dblGammaSum; } // dimensionless gibbs free energy in Region 2 = g/RT: The fundamental equation of region 2. See Equation 15 double if97_r2met_Gamma (double if97_pi, double if97_tau) { return if97_r2met_Gamma_o (if97_pi, if97_tau) + if97_r2met_Gamma_r (if97_pi, if97_tau); } // [d gamma_o / d pi] keeping tau constant double if97_r2met_GammaPi_o (double if97_pi) { double GammaPi_o = 1.0 / if97_pi; return GammaPi_o; } // [d squared gamma_o / d pi squared] keeping tau constant double if97_r2met_GammaPiPi_o (double if97_pi) { return -1.0 / sqr (if97_pi); } // [d gamma_o / d tau] keeping pi constant // Checked OK double if97_r2met_GammaTau_o (double if97_tau) { int i; double dblGammaSum = 0.0; double chunk = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_O; i++) { chunk = GIBBS_COEFFS_R2MET_O[i].ni * GIBBS_COEFFS_R2MET_O[i].Ji * pow(if97_tau, ( GIBBS_COEFFS_R2MET_O[i].Ji - 1.0)); dblGammaSum += chunk; //printf ("i\t%d\tchunk\t%e\tni\t%e\n", i, chunk, GIBBS_COEFFS_R2MET_O[i].ni); } //printf ("gammatautauo\t%.8e\t\n", dblGammaSum ); return dblGammaSum; } // [d squared gamma_o / d tau squared] keeping pi constant double if97_r2met_GammaTauTau_o (double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_O; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_O[i].ni * GIBBS_COEFFS_R2MET_O[i].Ji * ( GIBBS_COEFFS_R2MET_O[i].Ji - 1.0) * pow(if97_tau, ( GIBBS_COEFFS_R2MET_O[i].Ji - 2.0)); } return dblGammaSum; } // [d squared gamma_o / d pi d tau] const double if97_r2met_GammaPiTau_o = 0.0; // [d gamma_r / d pi] keeping tau constant // Checked OK double if97_r2met_GammaPi_r (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_R; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_R[i].ni * GIBBS_COEFFS_R2MET_R[i].Ii * pow( if97_pi, (GIBBS_COEFFS_R2MET_R[i].Ii - 1.0)) * pow((if97_tau - 0.5), GIBBS_COEFFS_R2MET_R[i].Ji); } return dblGammaSum; } // [d squared gamma_r / d pi squared] keeping tau constant double if97_r2met_GammaPiPi_r (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_R; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_R[i].ni * GIBBS_COEFFS_R2MET_R[i].Ii * (GIBBS_COEFFS_R2MET_R[i].Ii - 1.0) * pow(if97_pi, (GIBBS_COEFFS_R2MET_R[i].Ii - 2.0)) * pow((if97_tau - 0.5), GIBBS_COEFFS_R2MET_R[i].Ji); } return dblGammaSum; } // [d gamma_r / d tau] keeping pi constant // Checked OK double if97_r2met_GammaTau_r (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_R; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_R[i].ni * pow( if97_pi, GIBBS_COEFFS_R2MET_R[i].Ii) * GIBBS_COEFFS_R2MET_R[i].Ji * pow((if97_tau - 0.5), (GIBBS_COEFFS_R2MET_R[i].Ji - 1.0)); } return dblGammaSum; } // [d squared gamma_r / d tau squared] keeping pi constant // Checked OK double if97_r2met_GammaTauTau_r (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_R; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_R[i].ni * pow( if97_pi, GIBBS_COEFFS_R2MET_R[i].Ii) * GIBBS_COEFFS_R2MET_R[i].Ji * (GIBBS_COEFFS_R2MET_R[i].Ji - 1.0) * pow((if97_tau - 0.5), (GIBBS_COEFFS_R2MET_R[i].Ji - 2.0)); } return dblGammaSum; } // [d squared gamma_r / d tau squared] keeping pi constant double if97_r2met_GammaPiTau_r (double if97_pi, double if97_tau) { int i; double dblGammaSum = 0.0; #pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded for (i=1; i <= MAX_GIBBS_COEFFS_R2MET_R; i++) { dblGammaSum += GIBBS_COEFFS_R2MET_R[i].ni * GIBBS_COEFFS_R2MET_R[i].Ii * pow(if97_pi, (GIBBS_COEFFS_R2MET_R[i].Ii - 1.0)) * GIBBS_COEFFS_R2MET_R[i].Ji * pow((if97_tau - 0.5), ( GIBBS_COEFFS_R2MET_R[i].Ji - 1.0)); } return dblGammaSum; } //********************************************************** //********* REGION 2 PROPERTY EQUATIONS********************* // specific Gibbs free energy in region 2 (kJ / kg) double if97_r2met_g (double p_MPa , double t_Kelvin) { double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2 / t_Kelvin; return IF97_R * t_Kelvin * if97_r2met_Gamma(if97pi, if97tau); } // specific volume in region 2 (metres cubed per kilogram) // inputs need to convert to pure SI, hence the ´magic´ numbers // Checked OK double if97_r2met_v (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2 / t_Kelvin; return (IF97_R *1000 * t_Kelvin / (p_MPa * 1e6) ) * if97pi * ( if97_r2met_GammaPi_o(if97pi) + if97_r2met_GammaPi_r(if97pi, if97tau)); } // specific internal energy in region 2 (KJ / Kg) // Checked OK double if97_r2met_u (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2/t_Kelvin; return (IF97_R* t_Kelvin ) * ((if97tau * (if97_r2met_GammaTau_o(if97tau) + if97_r2met_GammaTau_r(if97pi, if97tau))) - (if97pi * (if97_r2met_GammaPi_o(if97pi) + if97_r2met_GammaPi_r(if97pi, if97tau))) ); } // specific entropy in region 2 (KJ / Kg.K) // Checked OK double if97_r2met_s (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2/t_Kelvin; return (IF97_R ) * (if97tau * (if97_r2met_GammaTau_o(if97tau) + if97_r2met_GammaTau_r(if97pi,if97tau)) - if97_r2met_Gamma(if97pi, if97tau)) ; } // specific enthalpy in region 2 (KJ / Kg) // Checked OK double if97_r2met_h (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2/t_Kelvin; return IF97_R * t_Kelvin * if97tau * (if97_r2met_GammaTau_o(if97tau) + if97_r2met_GammaTau_r(if97pi, if97tau)) ; } // specific isobaric heat capacity in region 2 (KJ / Kg.K) // Checked OK double if97_r2met_Cp (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2/t_Kelvin; return (-IF97_R * sqr(if97tau) * (if97_r2met_GammaTauTau_o(if97tau) + if97_r2met_GammaTauTau_r(if97pi, if97tau))) ; } // specific isochoric heat capacity in region 2 (KJ / Kg.K) // error in Moscow Power Institute page?? double if97_r2met_Cv (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2 / t_Kelvin; return IF97_R * ((- sqr(if97tau) * (if97_r2met_GammaTauTau_o(if97tau) + if97_r2met_GammaTauTau_r(if97pi, if97tau))) - ( sqr ( 1.0 + if97pi * if97_r2met_GammaPi_r(if97pi, if97tau) - if97tau * if97pi * if97_r2met_GammaPiTau_r(if97pi, if97tau)) / (1.0 - sqr(if97pi) * if97_r2met_GammaPiPi_r (if97pi, if97tau))) ) ; } // speed of sound in region 2 (m/s) // inputs need to convert to pure SI, hence the ´magic´ number 1000 // checked OK double if97_r2met_w (double p_MPa , double t_Kelvin ){ double if97pi = p_MPa / PSTAR_R2; double if97tau = TSTAR_R2/t_Kelvin; return sqrt( IF97_R * 1000 * t_Kelvin * ((1.0 + 2.0 * if97pi * if97_r2met_GammaPi_r(if97pi, if97tau) + sqr(if97pi) * sqr(if97_r2met_GammaPi_r(if97pi, if97tau))) / ((1.0 - sqr(if97pi) * if97_r2met_GammaPiPi_r(if97pi, if97tau)) + ( sqr ( 1.0 + if97pi * if97_r2met_GammaPi_r(if97pi, if97tau) - if97tau * if97pi * if97_r2met_GammaPiTau_r(if97pi, if97tau)) / ( sqr(if97tau) * (if97_r2met_GammaTauTau_o(if97tau) + if97_r2met_GammaTauTau_r(if97pi, if97tau))) ) ) ) ); }
kmp_abt_atomic.c
/* * kmp_atomic.c -- ATOMIC implementation routines */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "kmp_abt_atomic.h" #include "kmp_abt.h" // TRUE, asm routines prototypes typedef unsigned char uchar; typedef unsigned short ushort; /*! @defgroup ATOMIC_OPS Atomic Operations These functions are used for implementing the many different varieties of atomic operations. The compiler is at liberty to inline atomic operations that are naturally supported by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined @code static int s = 0; #pragma omp atomic s++; @endcode using the single instruction: `lock; incl s` However the runtime does provide entrypoints for these operations to support compilers that choose not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the increment above.) The names of the functions are encoded by using the data type name and the operation name, as in these tables. Data Type | Data type encoding -----------|--------------- int8_t | `fixed1` uint8_t | `fixed1u` int16_t | `fixed2` uint16_t | `fixed2u` int32_t | `fixed4` uint32_t | `fixed4u` int32_t | `fixed8` uint32_t | `fixed8u` float | `float4` double | `float8` float 10 (8087 eighty bit float) | `float10` complex<float> | `cmplx4` complex<double> | `cmplx8` complex<float10> | `cmplx10` <br> Operation | Operation encoding ----------|------------------- + | add - | sub \* | mul / | div & | andb << | shl \>\> | shr \| | orb ^ | xor && | andl \|\| | orl maximum | max minimum | min .eqv. | eqv .neqv. | neqv <br> For non-commutative operations, `_rev` can also be added for the reversed operation. For the functions that capture the result, the suffix `_cpt` is added. Update Functions ================ The general form of an atomic function that just performs an update (without a `capture`) @code void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand `capture` functions =================== The capture functions perform an atomic update and return a result, which is either the value before the capture, or that after. They take an additional argument to determine which result is returned. Their general form is therefore @code TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand @param flag one if the result is to be captured *after* the operation, zero if captured *before*. The one set of exceptions to this is the `complex<float>` type where the value is not returned, rather an extra argument pointer is passed. They look like @code void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); @endcode Read and Write Operations ========================= The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the value is read or written atomically, with no modification performed. In many cases on IA-32 architecture these operations can be inlined since the architecture guarantees that no tearing occurs on aligned objects accessed with a single memory operation of up to 64 bits in size. The general form of the read operations is @code TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc ); @endcode For the write operations the form is @code void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode Full list of functions ====================== This leads to the generation of 376 atomic functions, as follows. Functons for integers --------------------- There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). @code __kmpc_atomic_fixed1_add __kmpc_atomic_fixed1_add_cpt __kmpc_atomic_fixed1_add_fp __kmpc_atomic_fixed1_andb __kmpc_atomic_fixed1_andb_cpt __kmpc_atomic_fixed1_andl __kmpc_atomic_fixed1_andl_cpt __kmpc_atomic_fixed1_div __kmpc_atomic_fixed1_div_cpt __kmpc_atomic_fixed1_div_cpt_rev __kmpc_atomic_fixed1_div_float8 __kmpc_atomic_fixed1_div_fp __kmpc_atomic_fixed1_div_rev __kmpc_atomic_fixed1_eqv __kmpc_atomic_fixed1_eqv_cpt __kmpc_atomic_fixed1_max __kmpc_atomic_fixed1_max_cpt __kmpc_atomic_fixed1_min __kmpc_atomic_fixed1_min_cpt __kmpc_atomic_fixed1_mul __kmpc_atomic_fixed1_mul_cpt __kmpc_atomic_fixed1_mul_float8 __kmpc_atomic_fixed1_mul_fp __kmpc_atomic_fixed1_neqv __kmpc_atomic_fixed1_neqv_cpt __kmpc_atomic_fixed1_orb __kmpc_atomic_fixed1_orb_cpt __kmpc_atomic_fixed1_orl __kmpc_atomic_fixed1_orl_cpt __kmpc_atomic_fixed1_rd __kmpc_atomic_fixed1_shl __kmpc_atomic_fixed1_shl_cpt __kmpc_atomic_fixed1_shl_cpt_rev __kmpc_atomic_fixed1_shl_rev __kmpc_atomic_fixed1_shr __kmpc_atomic_fixed1_shr_cpt __kmpc_atomic_fixed1_shr_cpt_rev __kmpc_atomic_fixed1_shr_rev __kmpc_atomic_fixed1_sub __kmpc_atomic_fixed1_sub_cpt __kmpc_atomic_fixed1_sub_cpt_rev __kmpc_atomic_fixed1_sub_fp __kmpc_atomic_fixed1_sub_rev __kmpc_atomic_fixed1_swp __kmpc_atomic_fixed1_wr __kmpc_atomic_fixed1_xor __kmpc_atomic_fixed1_xor_cpt __kmpc_atomic_fixed1u_div __kmpc_atomic_fixed1u_div_cpt __kmpc_atomic_fixed1u_div_cpt_rev __kmpc_atomic_fixed1u_div_fp __kmpc_atomic_fixed1u_div_rev __kmpc_atomic_fixed1u_shr __kmpc_atomic_fixed1u_shr_cpt __kmpc_atomic_fixed1u_shr_cpt_rev __kmpc_atomic_fixed1u_shr_rev __kmpc_atomic_fixed2_add __kmpc_atomic_fixed2_add_cpt __kmpc_atomic_fixed2_add_fp __kmpc_atomic_fixed2_andb __kmpc_atomic_fixed2_andb_cpt __kmpc_atomic_fixed2_andl __kmpc_atomic_fixed2_andl_cpt __kmpc_atomic_fixed2_div __kmpc_atomic_fixed2_div_cpt __kmpc_atomic_fixed2_div_cpt_rev __kmpc_atomic_fixed2_div_float8 __kmpc_atomic_fixed2_div_fp __kmpc_atomic_fixed2_div_rev __kmpc_atomic_fixed2_eqv __kmpc_atomic_fixed2_eqv_cpt __kmpc_atomic_fixed2_max __kmpc_atomic_fixed2_max_cpt __kmpc_atomic_fixed2_min __kmpc_atomic_fixed2_min_cpt __kmpc_atomic_fixed2_mul __kmpc_atomic_fixed2_mul_cpt __kmpc_atomic_fixed2_mul_float8 __kmpc_atomic_fixed2_mul_fp __kmpc_atomic_fixed2_neqv __kmpc_atomic_fixed2_neqv_cpt __kmpc_atomic_fixed2_orb __kmpc_atomic_fixed2_orb_cpt __kmpc_atomic_fixed2_orl __kmpc_atomic_fixed2_orl_cpt __kmpc_atomic_fixed2_rd __kmpc_atomic_fixed2_shl __kmpc_atomic_fixed2_shl_cpt __kmpc_atomic_fixed2_shl_cpt_rev __kmpc_atomic_fixed2_shl_rev __kmpc_atomic_fixed2_shr __kmpc_atomic_fixed2_shr_cpt __kmpc_atomic_fixed2_shr_cpt_rev __kmpc_atomic_fixed2_shr_rev __kmpc_atomic_fixed2_sub __kmpc_atomic_fixed2_sub_cpt __kmpc_atomic_fixed2_sub_cpt_rev __kmpc_atomic_fixed2_sub_fp __kmpc_atomic_fixed2_sub_rev __kmpc_atomic_fixed2_swp __kmpc_atomic_fixed2_wr __kmpc_atomic_fixed2_xor __kmpc_atomic_fixed2_xor_cpt __kmpc_atomic_fixed2u_div __kmpc_atomic_fixed2u_div_cpt __kmpc_atomic_fixed2u_div_cpt_rev __kmpc_atomic_fixed2u_div_fp __kmpc_atomic_fixed2u_div_rev __kmpc_atomic_fixed2u_shr __kmpc_atomic_fixed2u_shr_cpt __kmpc_atomic_fixed2u_shr_cpt_rev __kmpc_atomic_fixed2u_shr_rev __kmpc_atomic_fixed4_add __kmpc_atomic_fixed4_add_cpt __kmpc_atomic_fixed4_add_fp __kmpc_atomic_fixed4_andb __kmpc_atomic_fixed4_andb_cpt __kmpc_atomic_fixed4_andl __kmpc_atomic_fixed4_andl_cpt __kmpc_atomic_fixed4_div __kmpc_atomic_fixed4_div_cpt __kmpc_atomic_fixed4_div_cpt_rev __kmpc_atomic_fixed4_div_float8 __kmpc_atomic_fixed4_div_fp __kmpc_atomic_fixed4_div_rev __kmpc_atomic_fixed4_eqv __kmpc_atomic_fixed4_eqv_cpt __kmpc_atomic_fixed4_max __kmpc_atomic_fixed4_max_cpt __kmpc_atomic_fixed4_min __kmpc_atomic_fixed4_min_cpt __kmpc_atomic_fixed4_mul __kmpc_atomic_fixed4_mul_cpt __kmpc_atomic_fixed4_mul_float8 __kmpc_atomic_fixed4_mul_fp __kmpc_atomic_fixed4_neqv __kmpc_atomic_fixed4_neqv_cpt __kmpc_atomic_fixed4_orb __kmpc_atomic_fixed4_orb_cpt __kmpc_atomic_fixed4_orl __kmpc_atomic_fixed4_orl_cpt __kmpc_atomic_fixed4_rd __kmpc_atomic_fixed4_shl __kmpc_atomic_fixed4_shl_cpt __kmpc_atomic_fixed4_shl_cpt_rev __kmpc_atomic_fixed4_shl_rev __kmpc_atomic_fixed4_shr __kmpc_atomic_fixed4_shr_cpt __kmpc_atomic_fixed4_shr_cpt_rev __kmpc_atomic_fixed4_shr_rev __kmpc_atomic_fixed4_sub __kmpc_atomic_fixed4_sub_cpt __kmpc_atomic_fixed4_sub_cpt_rev __kmpc_atomic_fixed4_sub_fp __kmpc_atomic_fixed4_sub_rev __kmpc_atomic_fixed4_swp __kmpc_atomic_fixed4_wr __kmpc_atomic_fixed4_xor __kmpc_atomic_fixed4_xor_cpt __kmpc_atomic_fixed4u_div __kmpc_atomic_fixed4u_div_cpt __kmpc_atomic_fixed4u_div_cpt_rev __kmpc_atomic_fixed4u_div_fp __kmpc_atomic_fixed4u_div_rev __kmpc_atomic_fixed4u_shr __kmpc_atomic_fixed4u_shr_cpt __kmpc_atomic_fixed4u_shr_cpt_rev __kmpc_atomic_fixed4u_shr_rev __kmpc_atomic_fixed8_add __kmpc_atomic_fixed8_add_cpt __kmpc_atomic_fixed8_add_fp __kmpc_atomic_fixed8_andb __kmpc_atomic_fixed8_andb_cpt __kmpc_atomic_fixed8_andl __kmpc_atomic_fixed8_andl_cpt __kmpc_atomic_fixed8_div __kmpc_atomic_fixed8_div_cpt __kmpc_atomic_fixed8_div_cpt_rev __kmpc_atomic_fixed8_div_float8 __kmpc_atomic_fixed8_div_fp __kmpc_atomic_fixed8_div_rev __kmpc_atomic_fixed8_eqv __kmpc_atomic_fixed8_eqv_cpt __kmpc_atomic_fixed8_max __kmpc_atomic_fixed8_max_cpt __kmpc_atomic_fixed8_min __kmpc_atomic_fixed8_min_cpt __kmpc_atomic_fixed8_mul __kmpc_atomic_fixed8_mul_cpt __kmpc_atomic_fixed8_mul_float8 __kmpc_atomic_fixed8_mul_fp __kmpc_atomic_fixed8_neqv __kmpc_atomic_fixed8_neqv_cpt __kmpc_atomic_fixed8_orb __kmpc_atomic_fixed8_orb_cpt __kmpc_atomic_fixed8_orl __kmpc_atomic_fixed8_orl_cpt __kmpc_atomic_fixed8_rd __kmpc_atomic_fixed8_shl __kmpc_atomic_fixed8_shl_cpt __kmpc_atomic_fixed8_shl_cpt_rev __kmpc_atomic_fixed8_shl_rev __kmpc_atomic_fixed8_shr __kmpc_atomic_fixed8_shr_cpt __kmpc_atomic_fixed8_shr_cpt_rev __kmpc_atomic_fixed8_shr_rev __kmpc_atomic_fixed8_sub __kmpc_atomic_fixed8_sub_cpt __kmpc_atomic_fixed8_sub_cpt_rev __kmpc_atomic_fixed8_sub_fp __kmpc_atomic_fixed8_sub_rev __kmpc_atomic_fixed8_swp __kmpc_atomic_fixed8_wr __kmpc_atomic_fixed8_xor __kmpc_atomic_fixed8_xor_cpt __kmpc_atomic_fixed8u_div __kmpc_atomic_fixed8u_div_cpt __kmpc_atomic_fixed8u_div_cpt_rev __kmpc_atomic_fixed8u_div_fp __kmpc_atomic_fixed8u_div_rev __kmpc_atomic_fixed8u_shr __kmpc_atomic_fixed8u_shr_cpt __kmpc_atomic_fixed8u_shr_cpt_rev __kmpc_atomic_fixed8u_shr_rev @endcode Functions for floating point ---------------------------- There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. (Ten byte floats are used by X87, but are now rare). @code __kmpc_atomic_float4_add __kmpc_atomic_float4_add_cpt __kmpc_atomic_float4_add_float8 __kmpc_atomic_float4_add_fp __kmpc_atomic_float4_div __kmpc_atomic_float4_div_cpt __kmpc_atomic_float4_div_cpt_rev __kmpc_atomic_float4_div_float8 __kmpc_atomic_float4_div_fp __kmpc_atomic_float4_div_rev __kmpc_atomic_float4_max __kmpc_atomic_float4_max_cpt __kmpc_atomic_float4_min __kmpc_atomic_float4_min_cpt __kmpc_atomic_float4_mul __kmpc_atomic_float4_mul_cpt __kmpc_atomic_float4_mul_float8 __kmpc_atomic_float4_mul_fp __kmpc_atomic_float4_rd __kmpc_atomic_float4_sub __kmpc_atomic_float4_sub_cpt __kmpc_atomic_float4_sub_cpt_rev __kmpc_atomic_float4_sub_float8 __kmpc_atomic_float4_sub_fp __kmpc_atomic_float4_sub_rev __kmpc_atomic_float4_swp __kmpc_atomic_float4_wr __kmpc_atomic_float8_add __kmpc_atomic_float8_add_cpt __kmpc_atomic_float8_add_fp __kmpc_atomic_float8_div __kmpc_atomic_float8_div_cpt __kmpc_atomic_float8_div_cpt_rev __kmpc_atomic_float8_div_fp __kmpc_atomic_float8_div_rev __kmpc_atomic_float8_max __kmpc_atomic_float8_max_cpt __kmpc_atomic_float8_min __kmpc_atomic_float8_min_cpt __kmpc_atomic_float8_mul __kmpc_atomic_float8_mul_cpt __kmpc_atomic_float8_mul_fp __kmpc_atomic_float8_rd __kmpc_atomic_float8_sub __kmpc_atomic_float8_sub_cpt __kmpc_atomic_float8_sub_cpt_rev __kmpc_atomic_float8_sub_fp __kmpc_atomic_float8_sub_rev __kmpc_atomic_float8_swp __kmpc_atomic_float8_wr __kmpc_atomic_float10_add __kmpc_atomic_float10_add_cpt __kmpc_atomic_float10_add_fp __kmpc_atomic_float10_div __kmpc_atomic_float10_div_cpt __kmpc_atomic_float10_div_cpt_rev __kmpc_atomic_float10_div_fp __kmpc_atomic_float10_div_rev __kmpc_atomic_float10_mul __kmpc_atomic_float10_mul_cpt __kmpc_atomic_float10_mul_fp __kmpc_atomic_float10_rd __kmpc_atomic_float10_sub __kmpc_atomic_float10_sub_cpt __kmpc_atomic_float10_sub_cpt_rev __kmpc_atomic_float10_sub_fp __kmpc_atomic_float10_sub_rev __kmpc_atomic_float10_swp __kmpc_atomic_float10_wr __kmpc_atomic_float16_add __kmpc_atomic_float16_add_cpt __kmpc_atomic_float16_div __kmpc_atomic_float16_div_cpt __kmpc_atomic_float16_div_cpt_rev __kmpc_atomic_float16_div_rev __kmpc_atomic_float16_max __kmpc_atomic_float16_max_cpt __kmpc_atomic_float16_min __kmpc_atomic_float16_min_cpt __kmpc_atomic_float16_mul __kmpc_atomic_float16_mul_cpt __kmpc_atomic_float16_rd __kmpc_atomic_float16_sub __kmpc_atomic_float16_sub_cpt __kmpc_atomic_float16_sub_cpt_rev __kmpc_atomic_float16_sub_rev __kmpc_atomic_float16_swp __kmpc_atomic_float16_wr @endcode Functions for Complex types --------------------------- Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. The names here are based on the size of the component float, *not* the size of the complex type. So `__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`. @code __kmpc_atomic_cmplx4_add __kmpc_atomic_cmplx4_add_cmplx8 __kmpc_atomic_cmplx4_add_cpt __kmpc_atomic_cmplx4_div __kmpc_atomic_cmplx4_div_cmplx8 __kmpc_atomic_cmplx4_div_cpt __kmpc_atomic_cmplx4_div_cpt_rev __kmpc_atomic_cmplx4_div_rev __kmpc_atomic_cmplx4_mul __kmpc_atomic_cmplx4_mul_cmplx8 __kmpc_atomic_cmplx4_mul_cpt __kmpc_atomic_cmplx4_rd __kmpc_atomic_cmplx4_sub __kmpc_atomic_cmplx4_sub_cmplx8 __kmpc_atomic_cmplx4_sub_cpt __kmpc_atomic_cmplx4_sub_cpt_rev __kmpc_atomic_cmplx4_sub_rev __kmpc_atomic_cmplx4_swp __kmpc_atomic_cmplx4_wr __kmpc_atomic_cmplx8_add __kmpc_atomic_cmplx8_add_cpt __kmpc_atomic_cmplx8_div __kmpc_atomic_cmplx8_div_cpt __kmpc_atomic_cmplx8_div_cpt_rev __kmpc_atomic_cmplx8_div_rev __kmpc_atomic_cmplx8_mul __kmpc_atomic_cmplx8_mul_cpt __kmpc_atomic_cmplx8_rd __kmpc_atomic_cmplx8_sub __kmpc_atomic_cmplx8_sub_cpt __kmpc_atomic_cmplx8_sub_cpt_rev __kmpc_atomic_cmplx8_sub_rev __kmpc_atomic_cmplx8_swp __kmpc_atomic_cmplx8_wr __kmpc_atomic_cmplx10_add __kmpc_atomic_cmplx10_add_cpt __kmpc_atomic_cmplx10_div __kmpc_atomic_cmplx10_div_cpt __kmpc_atomic_cmplx10_div_cpt_rev __kmpc_atomic_cmplx10_div_rev __kmpc_atomic_cmplx10_mul __kmpc_atomic_cmplx10_mul_cpt __kmpc_atomic_cmplx10_rd __kmpc_atomic_cmplx10_sub __kmpc_atomic_cmplx10_sub_cpt __kmpc_atomic_cmplx10_sub_cpt_rev __kmpc_atomic_cmplx10_sub_rev __kmpc_atomic_cmplx10_swp __kmpc_atomic_cmplx10_wr __kmpc_atomic_cmplx16_add __kmpc_atomic_cmplx16_add_cpt __kmpc_atomic_cmplx16_div __kmpc_atomic_cmplx16_div_cpt __kmpc_atomic_cmplx16_div_cpt_rev __kmpc_atomic_cmplx16_div_rev __kmpc_atomic_cmplx16_mul __kmpc_atomic_cmplx16_mul_cpt __kmpc_atomic_cmplx16_rd __kmpc_atomic_cmplx16_sub __kmpc_atomic_cmplx16_sub_cpt __kmpc_atomic_cmplx16_sub_cpt_rev __kmpc_atomic_cmplx16_swp __kmpc_atomic_cmplx16_wr @endcode */ /*! @ingroup ATOMIC_OPS @{ */ /* * Global vars */ #ifndef KMP_GOMP_COMPAT int __kmp_atomic_mode = 1; // Intel perf #else int __kmp_atomic_mode = 2; // GOMP compatibility #endif /* KMP_GOMP_COMPAT */ KMP_ALIGN(128) kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ /* 2007-03-02: Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug on *_32 and *_32e. This is just a temporary workaround for the problem. It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines in assembler language. */ #define KMP_ATOMIC_VOLATILE volatile #if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; #endif /* ------------------------------------------------------------------------ */ /* ATOMIC implementation routines */ /* one routine for each operation and operand type */ /* ------------------------------------------------------------------------ */ // All routines declarations looks like // void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); // ------------------------------------------------------------------------ #define KMP_CHECK_GTID \ if ( gtid == KMP_GTID_UNKNOWN ) { \ gtid = __kmp_entry_gtid(); \ } // check and get gtid when needed // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Lock variables used for critical sections for various size operands #define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat #define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char #define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short #define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int #define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float #define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int #define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double #define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex #define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double #define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad #define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex #define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex #define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) OP (rhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ // For GNU compatibility, we may need to use a critical section, // even though it is not required by the ISA. // // On IA-32 architecture, all atomic operations except for fixed 4 byte add, // sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common // critical section. On Intel(R) 64, all atomic operations are done with fetch // and add or compare and exchange. Therefore, the FLAG parameter to this // macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which // require a critical section, where we predict that they will be implemented // in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). // // When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, // the FLAG parameter should always be 1. If we know that we will be using // a critical section, then we want to make certain that we use the generic // lock __kmp_atomic_lock to protect the atomic update, and not of of the // locks that are specialized based upon the size or type of the data. // // If FLAG is 0, then we are relying on dead code elimination by the build // compiler to get rid of the useless block of code, and save a needless // branch at runtime. // #ifdef KMP_GOMP_COMPAT # define OP_GOMP_CRITICAL(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL( OP, 0 ); \ return; \ } # else # define OP_GOMP_CRITICAL(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ #if KMP_MIC # define KMP_DO_PAUSE _mm_delay_32( 1 ) #else # define KMP_DO_PAUSE KMP_CPU_PAUSE() #endif /* KMP_MIC */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator #define OP_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE old_value, new_value; \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ } \ } #if USE_CMPXCHG_FIX // 2007-06-25: // workaround for C78287 (complex(kind=4) data type) // lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) // Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. // This is a problem of the compiler. // Related tracker is C76005, targeted to 11.0. // I verified the asm of the workaround. #define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ { \ struct _sss { \ TYPE cmp; \ kmp_int##BITS *vvv; \ }; \ struct _sss old_value, new_value; \ old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ { \ KMP_DO_PAUSE; \ \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ } \ } // end of the first part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #endif #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // end of the second part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // Routines for ATOMIC 4-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub // Routines for ATOMIC 8-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // MASK - used for alignment check // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG /* ------------------------------------------------------------------------ */ /* Routines for C/C++ Reduction operators && and || */ /* ------------------------------------------------------------------------ */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment // TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used #define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CRITICAL( = *lhs OP, LCK_ID ) \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl /* ------------------------------------------------------------------------- */ /* Routines for Fortran operators that matched no one in C: */ /* MAX, MIN, .EQV., .NEQV. */ /* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ /* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ /* ------------------------------------------------------------------------- */ // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ *lhs = rhs; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT( OP, 0 ); \ return; \ } #else #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value; \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT(OP,LCK_ID) \ } \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ } \ } #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ } \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min #if KMP_HAVE_QUAD MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 #endif #endif // ------------------------------------------------------------------------ // Need separate macros for .EQV. because of the need of complement (~) // OP ignored for critical sections, ^=~ used instead #define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ } // ------------------------------------------------------------------------ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 #endif #endif // routines for complex types #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div // end of the workaround for C78287 #else ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div #endif // USE_CMPXCHG_FIX ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div #if KMP_HAVE_QUAD ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: x = expr binop x for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) = (rhs) OP (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_REV( OP, 0 ); \ return; \ } #else #define OP_GOMP_CRITICAL_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev #endif #endif // routines for complex types ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev #endif #endif #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 // End of OpenMP 4.0: x = expr binop x for non-commutative operations. #endif //OMP_40_ENABLED /* ------------------------------------------------------------------------ */ /* Routines for mixed types of LHS and RHS, when RHS is "larger" */ /* Note: in order to reduce the total number of types combinations */ /* it is supposed that compiler converts RHS to longest floating type,*/ /* that is _Quad, before call to any of these routines */ /* Conversion to _Quad will be done by the compiler during calculation, */ /* conversion back to TYPE - before the assignment, like: */ /* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ /* Performance penalty expected because of SW emulation use */ /* ------------------------------------------------------------------------ */ #define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // RHS=float8 ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) #if KMP_HAVE_QUAD ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #else #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #endif // USE_CMPXCHG_FIX #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 ////////////////////////////////////////////////////////////////////////////////////////////////////// // ------------------------------------------------------------------------ // Atomic READ routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store_ret" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) // TODO: check if it is still necessary // Return old value regardless of the result of "compare & swap# operation #define OP_CMPXCHG_READ(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ union f_i_union { \ TYPE f_val; \ kmp_int##BITS i_val; \ }; \ union f_i_union old_value; \ temp_val = *loc; \ old_value.f_val = temp_val; \ old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ new_value = old_value.f_val; \ return new_value; \ } // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_READ(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ new_value = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_READ(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ return new_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ OP_CMPXCHG_READ(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ return new_value; \ } // ------------------------------------------------------------------------ // Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. // Let's return the read value through the additional parameter. #if ( KMP_OS_WINDOWS ) #define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*out) = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ } #endif // KMP_OS_WINDOWS // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd // !!! TODO: Remove lock operations for "char" since it can't be non-atomic ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd #endif // KMP_HAVE_QUAD // Fix for CQ220361 on Windows* OS #if ( KMP_OS_WINDOWS ) ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #else ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #endif ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd #endif #endif // ------------------------------------------------------------------------ // Atomic WRITE routines // ------------------------------------------------------------------------ #define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_FIXED##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_REAL##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_WR(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ OP_CMPXCHG_WR(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #else ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #endif ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #else ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #endif ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr #endif ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr #endif #endif // ------------------------------------------------------------------------ // Atomic CAPTURE routines // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ new_value = (*lhs); \ } else { \ new_value = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE old_value, new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ if( flag ) { \ return old_value OP rhs; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for C/C++ Reduction operators && and || // ------------------------------------------------------------------------ // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_L_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ new_value OP rhs; \ } else \ new_value = (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_L_CPT( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment #define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt // ------------------------------------------------------------------------- // Routines for Fortran operators that matched no one in C: // MAX, MIN, .EQV., .NEQV. // Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt // Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ old_value = *lhs; \ *lhs = rhs; \ if ( flag ) \ new_value = rhs; \ else \ new_value = old_value; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; \ // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT_CPT( OP, 0 ); \ } #else #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*TYPE old_value; */ \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ if( flag ) \ return rhs; \ else \ return old_value; \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ } \ return *lhs; \ } #define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ } \ return *lhs; \ } MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt #if KMP_HAVE_QUAD MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt #endif #endif // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ } // The end of workaround for cmplx4 /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt #endif #endif // routines for complex types // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ /*temp_val = (*lhs);*/\ (*lhs) = (rhs) OP (*lhs); \ new_value = (*lhs); \ } else { \ new_value = (*lhs);\ (*lhs) = (rhs) OP (*lhs); \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev #endif #endif // routines for complex types // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) = (rhs) OP (*lhs); \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) = (rhs) OP (*lhs); \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ } // The end of workaround for cmplx4 // !!! TODO: check if we need to return void for cmplx4 routines // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev #endif #endif // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} #define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ old_value = (*lhs); \ (*lhs) = rhs; \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return old_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP( 0 ); \ } #else #define GOMP_CRITICAL_SWP(FLAG) #endif /* KMP_GOMP_COMPAT */ #define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define CMPXCHG_SWP(TYPE,BITS) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CMPXCHG_SWP(TYPE,BITS) \ } ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #else ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #endif // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) #define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CRITICAL_SWP(LCK_ID) \ } // ------------------------------------------------------------------------ // !!! TODO: check if we need to return void for cmplx4 routines // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ { \ KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP_WRK(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ tmp = (*lhs); \ (*lhs) = (rhs); \ (*out) = tmp; \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP_WRK(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP_WRK( 0 ); \ } #else #define GOMP_CRITICAL_SWP_WRK(FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ TYPE tmp; \ GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ CRITICAL_SWP_WRK(LCK_ID) \ } // The end of workaround for cmplx4 ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp #endif // cmplx4 routine to return void ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp //ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp #endif #endif // End of OpenMP 4.0 Capture #endif //OMP_40_ENABLED #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 #undef OP_CRITICAL /* ------------------------------------------------------------------------ */ /* Generic atomic routines */ /* ------------------------------------------------------------------------ */ void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #else TRUE #endif ) { kmp_int8 old_value, new_value; old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 1-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); } } void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ #endif ) { kmp_int16 old_value, new_value; old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 2-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); } } void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); if ( // // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. // Gomp compatibility is broken if this routine is called for floats. // #if KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ #endif ) { kmp_int32 old_value, new_value; old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_4i for all 4-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); } } void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ #endif ) { kmp_int64 old_value, new_value; old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, *(kmp_int64 *) &old_value, *(kmp_int64 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_8i for all 8-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); } } void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); } void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); } void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); } void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_global.init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); } // AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler // duplicated in order to not use 3-party names in pure Intel code // TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. void __kmpc_atomic_start(void) { int gtid = __kmp_entry_gtid(); KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); } void __kmpc_atomic_end(void) { int gtid = __kmp_get_gtid(); KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); } /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /*! @} */ // end of file
conv3x3x3.h
// Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef OCTREE_CONV3X3X3_H #define OCTREE_CONV3X3X3_H #include "core.h" #define INV_FILTER_TRUE true #define INV_FILTER_FALSE false #define ADD_BIAS_TRUE true #define ADD_BIAS_FALSE false /// Get 0-padded data from an octree for given dense subscript indices. /// @deprecated /// /// @param grid_in /// @param n /// @param d /// @param h /// @param w /// @param f /// @return 0 if the subscript indices are out of bounds, otherwise the data. OCTREE_FUNCTION inline ot_data_t conv3x3x3_get_padded_data(const octree* grid_in, int n, int d, int h, int w, int f) { const bool in_vol = d >= 0 && h >= 0 && w >= 0 && d < 8 * grid_in->grid_depth && h < 8 * grid_in->grid_height && w < 8 * grid_in->grid_width; if(in_vol) { const int grid_idx = octree_grid_idx(grid_in, n, d / 8, h / 8, w / 8); // printf("%d,%d,%d => %d, %d\n", d, h, w, grid_idx, bit_idx); const ot_tree_t* tree = octree_get_tree(grid_in, grid_idx); const int bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8); /* ot_data_t* data_in = grid_in->data_ptrs[grid_idx]; */ ot_data_t* data_in = octree_get_data(grid_in, grid_idx); const int data_idx = tree_data_idx(tree, bit_idx, grid_in->feature_size); return (data_in + data_idx)[f]; } else { return 0; } } /// Applies a 3x3x3 convolution on a given position in grid grid_in. /// @deprecated /// /// @tparam inv_filter if filter weights should be transposed /// @param n /// @param ds /// @param hs /// @param ws /// @param grid_in /// @param weights convolution weights. /// @param channels_out number of output channels after convolution. /// @param factor scaling factor of weights. /// @param out convolution result template <bool inv_filter> OCTREE_FUNCTION inline void conv3x3x3_point(int n, int ds, int hs, int ws, const octree* grid_in, const ot_data_t* weights, int channels_out, float factor, ot_data_t* out) { const int channels_in = grid_in->feature_size; for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int d, h, w; d = ds - 1 + kd; h = hs - 1 + kh; w = ws - 1 + kw; int k_idx; if(inv_filter) { k_idx = ((2 - kd) * 3 + (2 - kh)) * 3 + (2 - kw); } else { k_idx = (kd * 3 + kh) * 3 + kw; } for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = factor * conv3x3x3_get_padded_data(grid_in, n, d, h, w, ci); for(int co = 0; co < channels_out; ++co) { int weights_idx; if(inv_filter) { weights_idx = (ci * channels_out + co) * 3 * 3 * 3 + k_idx; } else { weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; } out[co] += weights[weights_idx] * in; } } } } } } /// Performs a 3x3x3 convolution along an octree cell surface. /// @deprecated /// /// @tparam inv_filter if filter weights should be transposed. /// @tparam add_bias if bias term should be added after convolution. /// @param bd1 start index of surface in shallow octree. /// @param bd2 end index of surface in shallow octree. /// @param bh1 start index of surface in shallow octree. /// @param bh2 end index of surface in shallow octree. /// @param bw1 start index of surface in shallow octree. /// @param bw2 end index of surface in shallow octree. /// @param n batch index. /// @param ds subscript index of tensor corresponding to shallow octree to locate surface. /// @param hs subscript index of tensor corresponding to shallow octree to locate surface. /// @param ws subscript index of tensor corresponding to shallow octree to locate surface. /// @param grid_in /// @param weights convolution weights. /// @param bias bias value. /// @param channels_out number of output channels after conv. /// @param factor scaling factor for convolution weights. /// @param out convolution result template <bool inv_filter, bool add_bias> OCTREE_FUNCTION inline void conv3x3x3_border(const int bd1, const int bd2, const int bh1, const int bh2, const int bw1, const int bw2, const int n, const int ds, const int hs, const int ws, const octree* grid_in, const ot_data_t* weights, const ot_data_t* bias, int channels_out, float factor, ot_data_t* out) { // weights \in R^{channels_out \times channels_in \times kd=3 \times kh=3 \times kw=3} // out \in R^{channels_out} int d, h, w; // Attention at start and end indices, so we convolve every border point only once d = ds + bd1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } d = ds + bd2 - 1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } h = hs + bh1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } h = hs + bh2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } w = ws + bw1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } w = ws + bw2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } if(add_bias) { ot_data_t bfactor = factor * (2 * (bh2 - bh1) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bh2 - bh1 - 2)); for(int co = 0; co < channels_out; ++co) { out[co] += bfactor * bias[co]; } } } /// Performs the convolution on the constant part of the octree cell. /// @deprecated /// /// @tparam inv_filter if filter weights should be transposed. /// @tparam add_bias if bias term should be added after convolution. /// @param in_data constant input cell data. /// @param weights convolution weights. /// @param bias bias term. /// @param channels_in number of input channels for convolution. /// @param channels_out number of output channels for convolution. /// @param factor scaling factor for conv weights. /// @param out convolution result template <bool inv_filter, bool add_bias> OCTREE_FUNCTION inline void conv3x3x3_const(const ot_data_t* in_data, const ot_data_t* weights, const ot_data_t* bias, int channels_in, int channels_out, float factor, ot_data_t* out) { for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int k_idx = (kd * 3 + kh) * 3 + kw; for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = factor * in_data[ci]; for(int co = 0; co < channels_out; ++co) { int weights_idx; if(inv_filter) { weights_idx = (ci * channels_out + co) * 3 * 3 * 3 + k_idx; } else { weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; } out[co] += weights[weights_idx] * in; } } } } } if(add_bias) { for(int co = 0; co < channels_out; ++co) { out[co] += factor * bias[co]; // printf(" b %f * %f => %f\n", factor, bias[co], out[co]); } } } /// Backward function for the convolution of a single point in the grid-octree /// data structure. /// Computes the gradient wrt. to the weights. /// @deprecated /// /// @param n /// @param ds subscript index of corresponding tensor. /// @param hs subscript index of corresponding tensor. /// @param ws subscript index of corresponding tensor. /// @param grid_in input to the convolution operation. /// @param grad_out gradient of the successive operation. /// @param channels_out number of output channels of the convolution operation. /// @param scale scale factor for the gradient update. /// @param grad_weights resulting gradient of convolution weights. OCTREE_FUNCTION inline void conv3x3x3_point_wbwd(int n, int ds, int hs, int ws, const octree* grid_in, const ot_data_t* grad_out, int channels_out, ot_data_t scale, ot_data_t* grad_weights) { const int channels_in = grid_in->feature_size; for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int d, h, w; d = ds - 1 + kd; h = hs - 1 + kh; w = ws - 1 + kw; int k_idx = (kd * 3 + kh) * 3 + kw; for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = conv3x3x3_get_padded_data(grid_in, n, d, h, w, ci); for(int co = 0; co < channels_out; ++co) { int weights_idx; weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; ot_data_t val = scale * grad_out[co] * in; #if defined(__CUDA_ARCH__) atomicAdd(grad_weights + weights_idx, val); #elif defined(_OPENMP) #pragma omp atomic grad_weights[weights_idx] += val; #else grad_weights[weights_idx] += val; #endif } } } } } } /// Backward function for the convolution along a octree cell surface. /// Computes the gradient wrt. to the weights. /// @deprecated /// /// @param bd1 start index of surface in shallow octree. /// @param bd2 end index of surface in shallow octree. /// @param bh1 start index of surface in shallow octree. /// @param bh2 end index of surface in shallow octree. /// @param bw1 start index of surface in shallow octree. /// @param bw2 end index of surface in shallow octree. /// @param n batch index. /// @param ds subscript index of tensor corresponding to shallow octree to locate surface. /// @param hs subscript index of tensor corresponding to shallow octree to locate surface. /// @param ws subscript index of tensor corresponding to shallow octree to locate surface. /// @param grid_in octree input to the convolution. /// @param grad_out gradient of the successive operation. /// @param channels_out number of output channels of the convolution. /// @param scale scale factor of the gradient update. /// @param grad_weights resulting gradient weights. /// @param grad_bias resulting gradient bias. OCTREE_FUNCTION inline void conv3x3x3_border_wbwd(const int bd1, const int bd2, const int bh1, const int bh2, const int bw1, const int bw2, const int n, const int ds, const int hs, const int ws, const octree* grid_in, const ot_data_t* grad_out, int channels_out, ot_data_t scale, ot_data_t* grad_weights, ot_data_t* grad_bias) { int d, h, w; // Attention at start and end indices, so we convolve every border point only once d = ds + bd1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } d = ds + bd2 - 1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } h = hs + bh1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } h = hs + bh2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } w = ws + bw1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } w = ws + bw2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } ot_data_t factor = scale * (2 * (bh2 - bh1) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bh2 - bh1 - 2)); for(int co = 0; co < channels_out; ++co) { ot_data_t val = factor * grad_out[co]; #if defined(__CUDA_ARCH__) atomicAdd(grad_bias + co, val); #elif defined(_OPENMP) #pragma omp atomic grad_bias[co] += val; #else grad_bias[co] += val; #endif } } /// Backward function for the constant convolution. /// @deprecated /// /// @param in_data constant input cell data. /// @param grad_out the gradient of the successive operation. /// @param channels_in the number of input channels /// @param channels_out number of output channels for convolution. /// @param scale scale factor of the gradient update. /// @param grad_weights resulting gradient weights. /// @param grad_bias resulting gradient bias. OCTREE_FUNCTION inline void conv3x3x3_const_wbwd(const ot_data_t* in_data, const ot_data_t* grad_out, int channels_in, int channels_out, float factor, ot_data_t* grad_weights, ot_data_t* grad_bias) { for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int k_idx = (kd * 3 + kh) * 3 + kw; for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = factor * in_data[ci]; for(int co = 0; co < channels_out; ++co) { int weights_idx; weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; ot_data_t val = grad_out[co] * in; #if defined(__CUDA_ARCH__) atomicAdd(grad_weights + weights_idx, val); #elif defined(_OPENMP) #pragma omp atomic grad_weights[weights_idx] += val; #else grad_weights[weights_idx] += val; #endif } } } } } for(int co = 0; co < channels_out; ++co) { ot_data_t val = factor * grad_out[co]; #if defined(__CUDA_ARCH__) atomicAdd(grad_bias + co, val); #elif defined(_OPENMP) #pragma omp atomic grad_bias[co] += val; #else grad_bias[co] += val; #endif } } #endif
atomic-10.c
/* PR middle-end/28046 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ /* { dg-require-effective-target cas_int } */ int a[3], b; struct C { int x; int y; } c; int bar (void), *baz (void); void foo (void) { #pragma omp atomic a[2] += bar (); #pragma omp atomic b += bar (); #pragma omp atomic c.y += bar (); #pragma omp atomic *baz () += bar (); } /* { dg-final { scan-tree-dump-times "__atomic_fetch_add" 4 "ompexp" } } */
GB_unop__identity_int32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int64) // op(A') function: GB (_unop_tran__identity_int32_int64) // C type: int32_t // A type: int64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int64) ( int32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log10_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log10_fp64_fp64) // op(A') function: GB (_unop_tran__log10_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log10 (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log10 (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log10 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log10_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log10 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log10 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log10_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
depthwise_conv2d.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_DEPTHWISE_CONV2D_H_ #define MACE_KERNELS_DEPTHWISE_CONV2D_H_ #if defined(MACE_ENABLE_NEON) && defined(__aarch64__) #include <arm_neon.h> #endif #include <algorithm> #include <memory> #include <vector> // We reuse TensorFlow Lite's optimized depthwiseconv_uint8 and parallelized it // using OpenMP for MACE's quantized depthwise_conv2d. #include "tensorflow/contrib/lite/kernels/internal/optimized/depthwiseconv_uint8.h" #include "mace/core/future.h" #include "mace/kernels/conv_pool_2d_util.h" #include "mace/kernels/activation.h" #include "mace/kernels/arm/depthwise_conv2d_neon.h" #include "mace/kernels/quantize.h" #include "mace/public/mace.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { struct DepthwiseConv2dFunctorBase : OpKernel { DepthwiseConv2dFunctorBase(OpKernelContext *context, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : OpKernel(context), strides_(strides), padding_type_(padding_type), paddings_(paddings), dilations_(dilations), activation_(activation), relux_max_limit_(relux_max_limit) {} const int *strides_; // [stride_h, stride_w] const Padding padding_type_; std::vector<int> paddings_; const int *dilations_; // [dilation_h, dilation_w] const ActivationType activation_; const float relux_max_limit_; }; template<DeviceType D, typename T> struct DepthwiseConv2dFunctor; template<> struct DepthwiseConv2dFunctor<DeviceType::CPU, float> : public DepthwiseConv2dFunctorBase { DepthwiseConv2dFunctor(OpKernelContext *context, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : DepthwiseConv2dFunctorBase(context, strides, padding_type, paddings, dilations, activation, relux_max_limit) {} void DepthwiseConv2dGeneral(const float *input, const float *filter, const index_t *in_shape, const index_t *out_shape, const index_t *filter_shape, const int *stride_hw, const int *dilation_hw, const int *pad_hw, float *output) { const index_t multiplier = filter_shape[0] / filter_shape[1]; #pragma omp parallel for collapse(2) for (index_t b = 0; b < in_shape[0]; ++b) { for (index_t m = 0; m < filter_shape[0]; ++m) { for (index_t h = 0; h < out_shape[2]; ++h) { for (index_t w = 0; w < out_shape[3]; ++w) { const index_t out_channels = filter_shape[0]; const index_t in_channels = filter_shape[1]; const index_t filter_height = filter_shape[2]; const index_t filter_width = filter_shape[3]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; index_t out_offset = ((b * out_channels + m) * out_height + h) * out_width + w; index_t c = m / multiplier; index_t o = m % multiplier; float sum = 0; for (index_t kh = 0; kh < filter_height; ++kh) { for (index_t kw = 0; kw < filter_width; ++kw) { index_t ih = h * stride_hw[0] + kh * dilation_hw[0] - pad_hw[0]; index_t iw = w * stride_hw[1] + kw * dilation_hw[1] - pad_hw[1]; if (ih >= 0 && ih < in_height && iw >= 0 && iw < in_width) { index_t in_offset = ((b * in_channels + c) * in_height + ih) * in_width + iw; index_t filter_offset = (((o * in_channels) + c) * filter_height + kh) * filter_width + kw; sum += input[in_offset] * filter[filter_offset]; } } } output[out_offset] = sum; } } } } } MaceStatus operator()(const Tensor *input, // NCHW const Tensor *filter, // OIHW const Tensor *bias, Tensor *output, // NCHW StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK_NOTNULL(input); MACE_CHECK_NOTNULL(filter); MACE_CHECK_NOTNULL(output); std::vector<index_t> output_shape(4); std::vector<int> paddings(2); std::vector<index_t> filter_shape {filter->dim(0) * filter->dim(1), filter->dim(1), filter->dim(2), filter->dim(3)}; if (paddings_.empty()) { CalcNCHWPaddingAndOutputSize(input->shape().data(), filter_shape.data(), dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcNCHWOutputSize(input->shape().data(), filter_shape.data(), paddings_.data(), dilations_, strides_, RoundType::FLOOR, output_shape.data()); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); output->Clear(); index_t batch = output->dim(0); index_t channels = output->dim(1); index_t height = output->dim(2); index_t width = output->dim(3); index_t input_batch = input->dim(0); index_t input_channels = input->dim(1); index_t input_height = input->dim(2); index_t input_width = input->dim(3); index_t filter_h = filter_shape[2]; index_t filter_w = filter_shape[3]; MACE_CHECK(filter_shape[0] == channels, filter_shape[0], " != ", channels); MACE_CHECK(filter_shape[1] == input_channels, filter_shape[1], " != ", input_channels); index_t stride_h = strides_[0]; index_t stride_w = strides_[1]; index_t dilation_h = dilations_[0]; index_t dilation_w = dilations_[1]; MACE_CHECK(batch == input_batch, "Input/Output batch size mismatch"); int pad_top = paddings[0] >> 1; int pad_bottom = paddings[0] - pad_top; int pad_left = paddings[1] >> 1; int pad_right = paddings[1] - pad_left; index_t valid_h_start = pad_top == 0 ? 0 : (pad_top - 1) / stride_h + 1; index_t valid_h_stop = pad_bottom == 0 ? height : height - ((pad_bottom - 1) / stride_h + 1); index_t valid_w_start = pad_left == 0 ? 0 : (pad_left - 1) / stride_w + 1; index_t valid_w_stop = pad_right == 0 ? width : width - ((pad_right - 1) / stride_w + 1); std::function<void(const float *input, float *output)> conv_func; Tensor::MappingGuard input_guard(input); Tensor::MappingGuard filter_guard(filter); Tensor::MappingGuard bias_guard(bias); Tensor::MappingGuard output_guard(output); auto input_data = input->data<float>(); auto filter_data = filter->data<float>(); auto bias_data = bias == nullptr ? nullptr : bias->data<float>(); auto output_data = output->mutable_data<float>(); const int pad_hw[2] = {pad_top, pad_left}; const index_t input_shape[4] = {batch, input_channels, input_height, input_width}; // make host compiler happy MACE_UNUSED(pad_hw); MACE_UNUSED(input_shape); if (filter_h == 3 && filter_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) { conv_func = [=](const float *input, float *output) { DepthwiseConv2dNeonK3x3S1(input, filter_data, input_shape, output_shape.data(), pad_hw, valid_h_start, valid_h_stop, valid_w_start, valid_w_stop, output); }; } else if (filter_h == 3 && filter_w == 3 && stride_h == 2 && stride_w == 2 && dilation_h == 1 && dilation_w == 1) { conv_func = [=](const float *input, float *output) { DepthwiseConv2dNeonK3x3S2(input, filter_data, input_shape, output_shape.data(), pad_hw, valid_h_start, valid_h_stop, valid_w_start, valid_w_stop, output); }; } else { conv_func = [=](const float *input, float *output) { DepthwiseConv2dGeneral(input, filter_data, input_shape, output_shape.data(), filter_shape.data(), strides_, dilations_, pad_hw, output); }; } conv_func(input_data, output_data); if (bias_data != nullptr) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch; ++b) { for (index_t c = 0; c < channels; ++c) { for (index_t i = 0; i < height * width; ++i) { output_data[(b * channels + c) * height * width + i] += bias_data[c]; } } } } DoActivation(output_data, output_data, output->size(), activation_, relux_max_limit_); return MACE_SUCCESS; } }; template<> struct DepthwiseConv2dFunctor<DeviceType::CPU, uint8_t> : public DepthwiseConv2dFunctorBase { DepthwiseConv2dFunctor(OpKernelContext *context, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : DepthwiseConv2dFunctorBase(context, strides, padding_type, paddings, dilations, activation, relux_max_limit) {} void DepthwiseConv2dGeneral(const uint8_t *input, const uint8_t *filter, const int32_t *bias, const index_t *in_shape, const index_t *out_shape, const index_t *filter_shape, const int32_t input_zero, const int32_t filter_zero, const int32_t output_zero, const float output_multiplier, const int *stride_hw, const int *dilation_hw, const int *pad_hw, uint8_t *output) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < out_shape[0]; ++b) { for (index_t h = 0; h < out_shape[1]; ++h) { for (index_t w = 0; w < out_shape[2]; ++w) { for (index_t m = 0; m < out_shape[3]; ++m) { const index_t filter_height = filter_shape[0]; const index_t filter_width = filter_shape[1]; const index_t in_channels = filter_shape[2]; const index_t depth_multiplier = filter_shape[3]; const index_t in_height = in_shape[1]; const index_t in_width = in_shape[2]; const index_t out_height = out_shape[1]; const index_t out_width = out_shape[2]; const index_t out_channels = out_shape[3]; index_t out_offset = ((b * out_height + h) * out_width + w) * out_channels + m; index_t c = m / depth_multiplier; index_t o = m % depth_multiplier; index_t ih_base = h * stride_hw[0] - pad_hw[0]; index_t iw_base = w * stride_hw[1] - pad_hw[1]; int32_t sum = 0; for (index_t kh = 0; kh < filter_height; ++kh) { const index_t ih = ih_base + kh * dilation_hw[0]; for (index_t kw = 0; kw < filter_width; ++kw) { const index_t iw = iw_base + kw * dilation_hw[1]; if (ih >= 0 && ih < in_height && iw >= 0 && iw < in_width) { index_t in_offset = ((b * in_height + ih) * in_width + iw) * in_channels + c; index_t filter_offset = ((kh * filter_width + kw) * in_channels + c) * depth_multiplier + o; sum += (input[in_offset] - input_zero) * (filter[filter_offset] - filter_zero); } } } if (bias) { sum += bias[m]; } sum = static_cast<int32_t>(std::round(sum * output_multiplier)); sum += output_zero; output[out_offset] = static_cast<uint8_t>(std::min(255, std::max(0, sum))); } } } } } inline tflite::Dims<4> ShapeToTfliteDims(const std::vector<index_t> &shape) { tflite::Dims<4> d; for (int i = 0; i < 4; ++i) { int src = static_cast<int>(shape.size() - i - 1); if (src >= 0) { d.sizes[i] = shape[src]; } else { d.sizes[i] = 1; } } d.strides[0] = 1; for (int i = 1; i < 4; i++) { d.strides[i] = d.strides[i - 1] * d.sizes[i - 1]; } return d; } MaceStatus operator()(const Tensor *input, // NHWC const Tensor *filter, // HWIM const Tensor *bias, Tensor *output, // NHWC StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK_NOTNULL(input); MACE_CHECK_NOTNULL(filter); MACE_CHECK_NOTNULL(output); std::vector<index_t> output_shape(4); std::vector<int> paddings(2); // reuse OHWI format, only for calculating output std::vector<index_t> ohwi_shape{ filter->dim(2) * filter->dim(3), filter->dim(0), filter->dim(1), 1}; if (paddings_.empty()) { CalcPaddingAndOutputSize(input->shape().data(), NHWC, ohwi_shape.data(), OHWI, dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcOutputSize(input->shape().data(), NHWC, ohwi_shape.data(), OHWI, paddings_.data(), dilations_, strides_, RoundType::FLOOR, output_shape.data()); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); output->Clear(); MACE_CHECK(output->dim(0) == input->dim(0), "Input/Output batch size mismatch"); MACE_CHECK(filter->dim(2) == input->dim(3), filter->dim(2), " != ", input->dim(3)); index_t out_channels = output_shape[3]; index_t stride_h = strides_[0]; index_t stride_w = strides_[1]; index_t dilation_h = dilations_[0]; index_t dilation_w = dilations_[1]; int pad_top = paddings[0] >> 1; int pad_left = paddings[1] >> 1; Tensor::MappingGuard input_guard(input); Tensor::MappingGuard filter_guard(filter); Tensor::MappingGuard bias_guard(bias); Tensor::MappingGuard output_guard(output); auto input_data = input->data<uint8_t>(); auto filter_data = filter->data<uint8_t>(); auto output_data = output->mutable_data<uint8_t>(); if (dilation_h == 1 && dilation_w == 1) { std::vector<index_t> bias_shape{out_channels}; std::unique_ptr<Tensor> zero_bias; const int32_t *bias_data = nullptr; if (bias == nullptr) { zero_bias.reset( new Tensor(GetCPUAllocator(), DT_INT32)); zero_bias->Resize(bias_shape); zero_bias->Clear(); bias_data = zero_bias->data<int32_t>(); } else { bias_data = bias->data<int32_t>(); } int32_t quantized_multiplier; int32_t right_shift; GetOutputMultiplierAndShift(input->scale(), filter->scale(), output->scale(), &quantized_multiplier, &right_shift); // 1HWO std::vector<index_t> filter_shape{ 1, filter->dim(0), filter->dim(1), filter->dim(2) * filter->dim(3)}; tflite::optimized_ops::DepthwiseConv( input_data, ShapeToTfliteDims(input->shape()), -input->zero_point(), filter_data, ShapeToTfliteDims(filter_shape), -filter->zero_point(), bias_data, ShapeToTfliteDims(bias_shape), stride_w, stride_h, pad_left, pad_top, filter->dim(3), output->zero_point(), quantized_multiplier, right_shift, 0, 255, output_data, ShapeToTfliteDims(output->shape())); } else { auto bias_data = bias == nullptr ? nullptr : bias->data<int32_t>(); float output_multiplier = input->scale() * filter->scale() / output->scale(); const int pad_hw[2] = {pad_top, pad_left}; DepthwiseConv2dGeneral( input_data, filter_data, bias_data, input->shape().data(), output_shape.data(), filter->shape().data(), input->zero_point(), filter->zero_point(), output->zero_point(), output_multiplier, strides_, dilations_, pad_hw, output_data); } return MACE_SUCCESS; } }; #ifdef MACE_ENABLE_OPENCL class OpenCLDepthwiseConv2dKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input, const Tensor *filter, const Tensor *bias, const int *strides, const Padding &padding_type, const std::vector<int> &padding_data, const int *dilations, const ActivationType activation, const float relux_max_limit, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLDepthwiseConv2dKernel); }; template<typename T> struct DepthwiseConv2dFunctor<DeviceType::GPU, T> : DepthwiseConv2dFunctorBase { DepthwiseConv2dFunctor(OpKernelContext *context, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit); MaceStatus operator()(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLDepthwiseConv2dKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_DEPTHWISE_CONV2D_H_
test2.c
int main() { int x; #pragma omp parallel { x = 0; 0; if (1) { 2; #pragma omp barrier x; 3; } else { 4; while (5) { 6; #pragma omp barrier 7; } 8; } 9; #pragma omp barrier 10; } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(8*t3+Nx+4,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/resize.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict chop_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict chop_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t i; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (i=0; i < (ssize_t) GetImageListLength(images); i+=4) { cmyk_image=CloneImage(images,0,0,MagickTrue,exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace); image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p))); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(cmyk_view); for (x=0; x < (ssize_t) images->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange- GetPixelIntensity(images,p))); p++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); AppendImageToList(&cmyk_images,cmyk_image); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) || ((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict crop_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view); (void) memcpy(q,p,(size_t) crop_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (crop_indexes != (IndexPacket *) NULL)) (void) memcpy(crop_indexes,indexes,(size_t) crop_image->columns* sizeof(*crop_indexes)); if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict excerpt_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) memcpy(q,p,(size_t) excerpt_image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view); if (excerpt_indexes != (IndexPacket *) NULL) (void) memcpy(excerpt_indexes,indexes,(size_t) excerpt_image->columns*sizeof(*excerpt_indexes)); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image); if (status == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image->compose,image,-geometry->x, -geometry->y); if (status == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict flip_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) memcpy(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewVirtualIndexQueue(image_view); if (indexes != (const IndexPacket *) NULL) { flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view); if (flip_indexes != (IndexPacket *) NULL) (void) memcpy(flip_indexes,indexes,(size_t) image->columns* sizeof(*flip_indexes)); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict flop_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=flop_image->columns; indexes=GetCacheViewVirtualIndexQueue(image_view); flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view); for (x=0; x < (ssize_t) flop_image->columns; x++) { (*--q)=(*p++); if ((indexes != (const IndexPacket *) NULL) && (flop_indexes != (IndexPacket *) NULL)) SetPixelIndex(flop_indexes+flop_image->columns-x-1, GetPixelIndex(indexes+x)); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict destination_indexes; register PixelPacket *magick_restrict q; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source_view); (void) memcpy(q,p,(size_t) columns*sizeof(*p)); if (indexes != (IndexPacket *) NULL) { destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); if (destination_indexes != (IndexPacket *) NULL) (void) memcpy(destination_indexes,indexes,(size_t) columns*sizeof(*indexes)); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse) { InheritException(exception,&splice_image->exception); splice_image=DestroyImage(splice_image); return((Image *) NULL); } (void) SetImageBackgroundColor(splice_image); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case StaticGravity: case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes, *magick_restrict splice_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes, *magick_restrict splice_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ /* DANGER: This function destroys what it assumes to be a single image list. If the input image is part of a larger list, all other images in that list will be simply 'lost', not destroyed. Also if the crop generates a list of images only the first image is resized. And finally if the crop succeeds and the resize failed, you will get a cropped image, as well as a 'false' or 'failed' report. This function and should probably be deprecated in favor of direct calls to CropImageToTiles() or ResizeImage(), as appropriate. */ MagickExport MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry) { Image *resize_image, *transform_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ flags=ParseRegionGeometry(transform_image,image_geometry,&geometry, &(*image)->exception); (void) flags; if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,transform_image->blur,&(*image)->exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImages() calls TransformImage() on each image of a sequence. % % The format of the TransformImage method is: % % MagickBooleanType TransformImages(Image **image, % const char *crop_geometry,const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ MagickExport MagickBooleanType TransformImages(Image **images, const char *crop_geometry,const char *image_geometry) { Image *image, **image_list, *transform_images; MagickStatusType status; register ssize_t i; assert(images != (Image **) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); image_list=ImageListToArray(*images,&(*images)->exception); if (image_list == (Image **) NULL) return(MagickFalse); status=MagickTrue; transform_images=NewImageList(); for (i=0; image_list[i] != (Image *) NULL; i++) { image=image_list[i]; status&=TransformImage(&image,crop_geometry,image_geometry); AppendImageToList(&transform_images,image); } *images=transform_images; image_list=(Image **) RelinquishMagickMemory(image_list); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict transpose_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) memcpy(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view); if (transpose_indexes != (IndexPacket *) NULL) (void) memcpy(transpose_indexes,indexes,(size_t) image->columns*sizeof(*transpose_indexes)); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict transverse_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y- 1),0,1,transverse_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view); if (transverse_indexes != (IndexPacket *) NULL) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(transverse_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
CPULauncher.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <vector> #include "open3d/core/AdvancedIndexing.h" #include "open3d/core/Indexer.h" #include "open3d/core/Tensor.h" #include "open3d/core/kernel/ParallelUtil.h" #include "open3d/utility/Console.h" namespace open3d { namespace core { namespace kernel { class CPULauncher { public: /// Fills tensor[:][i] with element_kernel(i). /// /// \param indexer The input tensor and output tensor to the indexer are the /// same (as a hack), since the tensor are filled in-place. /// \param element_kernel A function that takes pointer location and /// workload_idx, computes the value to fill, and fills the value at the /// pointer location. template <typename func_t> static void LaunchIndexFillKernel(const Indexer& indexer, func_t element_kernel) { #pragma omp parallel for schedule(static) for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), workload_idx); } } template <typename func_t> static void LaunchUnaryEWKernel(const Indexer& indexer, func_t element_kernel) { #pragma omp parallel for schedule(static) for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), indexer.GetOutputPtr(workload_idx)); } } template <typename func_t> static void LaunchBinaryEWKernel(const Indexer& indexer, func_t element_kernel) { #pragma omp parallel for schedule(static) for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), indexer.GetInputPtr(1, workload_idx), indexer.GetOutputPtr(workload_idx)); } } template <typename func_t> static void LaunchAdvancedIndexerKernel(const AdvancedIndexer& indexer, func_t element_kernel) { #pragma omp parallel for schedule(static) for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(workload_idx), indexer.GetOutputPtr(workload_idx)); } } template <typename scalar_t, typename func_t> static void LaunchReductionKernelSerial(const Indexer& indexer, func_t element_kernel) { for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), indexer.GetOutputPtr(workload_idx)); } } /// Create num_threads workers to compute partial reductions and then reduce /// to the final results. This only applies to reduction op with one output. template <typename scalar_t, typename func_t> static void LaunchReductionKernelTwoPass(const Indexer& indexer, func_t element_kernel, scalar_t identity) { if (indexer.NumOutputElements() > 1) { utility::LogError( "Internal error: two-pass reduction only works for " "single-output reduction ops."); } int64_t num_workloads = indexer.NumWorkloads(); int64_t num_threads = GetMaxThreads(); int64_t workload_per_thread = (num_workloads + num_threads - 1) / num_threads; std::vector<scalar_t> thread_results(num_threads, identity); #pragma omp parallel for schedule(static) for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { int64_t start = thread_idx * workload_per_thread; int64_t end = std::min(start + workload_per_thread, num_workloads); for (int64_t workload_idx = start; workload_idx < end; ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), &thread_results[thread_idx]); } } void* output_ptr = indexer.GetOutputPtr(0); for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { element_kernel(&thread_results[thread_idx], output_ptr); } } template <typename scalar_t, typename func_t> static void LaunchReductionParallelDim(const Indexer& indexer, func_t element_kernel) { // Prefers outer dimension >= num_threads. const int64_t* indexer_shape = indexer.GetMasterShape(); const int64_t num_dims = indexer.NumDims(); int64_t num_threads = GetMaxThreads(); // Init best_dim as the outer-most non-reduction dim. int64_t best_dim = num_dims - 1; while (best_dim >= 0 && indexer.IsReductionDim(best_dim)) { best_dim--; } for (int64_t dim = best_dim; dim >= 0 && !indexer.IsReductionDim(dim); --dim) { if (indexer_shape[dim] >= num_threads) { best_dim = dim; break; } else if (indexer_shape[dim] > indexer_shape[best_dim]) { best_dim = dim; } } if (best_dim == -1) { utility::LogError( "Internal error: all dims are reduction dims, use " "LaunchReductionKernelTwoPass instead."); } #pragma omp parallel for schedule(static) for (int64_t i = 0; i < indexer_shape[best_dim]; ++i) { Indexer sub_indexer(indexer); sub_indexer.ShrinkDim(best_dim, i, 1); LaunchReductionKernelSerial<scalar_t>(sub_indexer, element_kernel); } } /// General kernels with non-conventional indexers template <typename func_t> static void LaunchGeneralKernel(int64_t n, func_t element_kernel) { #pragma omp parallel for schedule(static) for (int64_t workload_idx = 0; workload_idx < n; ++workload_idx) { element_kernel(workload_idx); } } }; } // namespace kernel } // namespace core } // namespace open3d