source
stringlengths
3
92
c
stringlengths
26
2.25M
ten_tusscher_2004_epi_S3_5.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_5.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5624423262106,0.00129167275470664,0.779566791615456,0.779410913961902,0.000174854203482290,0.485029627881613,0.00294151853152467,0.999998346761065,1.93536430763538e-08,1.89253322511936e-05,0.999770718781989,1.00705290160006,0.999994726153831,4.61702239557613e-05,0.428543055333246,10.5677098069322,139.088209861540}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4147498667915,0.000251124617142363,0.000137013485769181,0.000169470481210674,0.253895228590867,0.152733305273802,0.167845467014990,4.51645656101753,0.0160856241829014,1.32915393527547,1099.63821888814,0.000521101954310938,0.130615895825142,0.0198817586800201,0.00476047046076979,6.04955465909554e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
ipv4_crypt.c
// Format-preserving IPv4 encryption // Maps each IPv4 address onto a different IPv4 address via a 64-bit key. // // int ipv4_encrypt(char *ip, const void *key); // int ipv4_decrypt(char *ip, const void *key); // // This is free and unencumbered software released into the public domain. // Decode a quad-dotted IPv4 address string into a numerical address. // Returns -1 for invalid input, otherwise the numerical address. static long long ipv4_decode(const char *s) { unsigned long ip = 0; int c = 0, n = 0, v = 0; for (const char *p = s; ; p++) { switch (*p) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': v = v*10 + *p - '0'; if (v > 255) return -1; n++; break; case '.': case 0: if (!n || c == 4) return -1; ip = ip<<8 | v; c++; if (!*p) { return c == 4 ? (long long)ip : -1LL; } n = v = 0; break; default: return -1; } } } // Encode a numerical IPv4 address into a quad-dotted address string. The // destination buffer size must be at least 16 bytes. static void ipv4_encode(char *s, unsigned long ip) { for (int i = 3; i >= 0; i--) { int v = ip>>(i*8) & 0xff; *s = '0' + v/100 ; s += v >= 100; *s = '0' + v/10%10; s += v >= 10; *s = '0' + v%10 ; s++; *s = i ? '.' : 0 ; s++; } } // Encrypt a quad-dotted IPv4 address in place using format-preserving // encryption. The key size is 8 bytes, and the buffer must have room for // at least 16 bytes. Returns 1 on success or 0 if the input was invalid. int ipv4_encrypt(char *s, const void *key) { long long r = ipv4_decode(s); if (r < 0) { return 0; } const unsigned char *p = key; unsigned long k0 = (unsigned long)p[0] << 0 | (unsigned long)p[1] << 8 | (unsigned long)p[2] << 16 | (unsigned long)p[3] << 24; unsigned long k1 = (unsigned long)p[4] << 0 | (unsigned long)p[5] << 8 | (unsigned long)p[6] << 16 | (unsigned long)p[7] << 24; unsigned long ip = r; ip += k0; ip &= 0xffffffffU; ip ^= ip >> 17; ip *= 0x9e485565U; ip += k1; ip &= 0xffffffffU; ip ^= ip >> 16; ip *= 0xef1d6b47U; ip &= 0xffffffffU; ip ^= ip >> 16; ipv4_encode(s, ip ^ k0 ^ k1); return 1; } // Decrypt a quad-dotted IPv4 address in place using format-preserving // encryption. The key size is 8 bytes, and the buffer must have room for // at least 16 bytes. Returns 1 on success or 0 if the input was invalid. int ipv4_decrypt(char *s, const void *key) { long long r = ipv4_decode(s); if (r < 0) { return 0; } const unsigned char *p = key; unsigned long k0 = (unsigned long)p[0] << 0 | (unsigned long)p[1] << 8 | (unsigned long)p[2] << 16 | (unsigned long)p[3] << 24; unsigned long k1 = (unsigned long)p[4] << 0 | (unsigned long)p[5] << 8 | (unsigned long)p[6] << 16 | (unsigned long)p[7] << 24; unsigned long ip = r ^ k0 ^ k1; ip ^= ip >> 16; ip *= 0xeb00ce77U; ip &= 0xffffffffU; ip ^= ip >> 16; ip -= k1; ip *= 0x88ccd46dU; ip &= 0xffffffffU; ip ^= ip >> 17; ip -= k0; ipv4_encode(s, ip & 0xffffffffU); return 1; } #ifdef TEST // Usage: // $ cc -DTEST -O3 -fopenmp -o ipv4_crypt ipv4_crypt.c // $ printf '%s\n' 127.0.0.1 10.0.0.1 | ./ipv4_crypt #include <stdio.h> #include <string.h> int main(void) { char buf[32]; unsigned char key[8] = {0xab, 0xfc, 0x0d, 0x86, 0xea, 0x47, 0x56, 0xc5}; while (fgets(buf, sizeof(buf), stdin)) { char *e = strchr(buf, '\n'); if (e) *e = 0; int r = ipv4_encrypt(buf, key); if (!r) { puts("INVALID"); continue; } printf("%s\t", buf); ipv4_decrypt(buf, key); puts(buf); } /* Test encode/decode */ #pragma omp parallel for for (long long ip = 0; ip < 1LL<<32; ip++) { char want[16], got[16]; sprintf(want, "%d.%d.%d.%d", (int)(ip >> 24 & 0xff), (int)(ip >> 16 & 0xff), (int)(ip >> 8 & 0xff), (int)(ip >> 0 & 0xff)); ipv4_encode(got, ip); if (strcmp(want, got)) { printf("FAIL: (encode) %08llx, %s != %s\n", ip, want, got); } long long r = ipv4_decode(want); ipv4_encode(got, r); if (r != ip) { printf("FAIL: (decode) %08llx, %s != %s\n", ip, want, got); } } /* Test encrypt/decrypt */ #pragma omp parallel for for (long long ip = 0; ip < 1LL<<32; ip++) { char want[16], got[16]; ipv4_encode(want, ip); ipv4_encode(got, ip); ipv4_encrypt(got, key); ipv4_decrypt(got, key); if (strcmp(want, got)) { printf("FAIL: (encrypt) %08llx, %s != %s\n", ip, want, got); } } } #endif
ci_weights.h
/* The MIT License (MIT) * * (c) Jürgen Simon 2014 (juergen.simon@uni-bonn.de) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef M3D_OASE_CI_WEIGHTFUNCTION_H #define M3D_OASE_CI_WEIGHTFUNCTION_H #include <meanie3D/defines.h> #include <meanie3D/namespaces.h> #include <meanie3D/array.h> #include <meanie3D/clustering/cluster_op.h> #include <meanie3D/clustering/cluster_list.h> #include <meanie3D/filters/scalespace_filter.h> #include <meanie3D/filters.h> #include <meanie3D/tracking.h> #include <meanie3D/utils/time_utils.h> #include <netcdf> #include <vector> #include <map> #include "weight_function.h" namespace m3D { // // Constants // // Variables used for scoring scheme static const size_t CI_WEIGHT_NUM_VARS = 6; static const char *CI_WEIGHT_VARS[] = { "msevi_l15_ir_108", "msevi_l15_wv_062", "msevi_l15_ir_134", "cband_radolan_rx", "linet_oase_tl", "msevi_l15_hrv" }; // Shorthands used to access variables in order static const int msevi_l15_ir_108 = 0; static const int msevi_l15_wv_062 = 1; static const int msevi_l15_ir_134 = 2; static const int cband_radolan_rx = 3; static const int linet_oase_tl = 4; static const int msevi_l15_hrv = 5; // Variables used for protoclusters static const size_t PROTOCLUSTER_NUM_VARS = 1; static const char *PROTOCLUSTER_VARS[] = { "msevi_l15_ir_108" }; /** This class represents a weight function loosely based on the CI score * by Walker, MacKenzie Mecicalski, Jewett (2012). Only the static score * criteria are used (no time differences). * * It adds score by looking for radar and lighting signatures in a 5km * radius around a given point. * */ template<class T> class OASECIWeightFunction : public WeightFunction<T> { private: // // Members // detection_params_t <T> m_super_params; detection_context_t <T> m_super_context; detection_params_t <T> m_params; detection_context_t <T> m_ctx; NetCDFDataStore <T> *m_data_store; NetCDFDataStore <T> *m_ci_comparison_data_store; // const std::string *m_ci_comparison_file; // const CoordinateSystem<T> *m_coordinate_system; MultiArray <T> *m_weight; MultiArray<bool> *m_overlap; std::vector<std::string> m_variable_names; // bool m_satellite_only; // bool m_use_walker_mecikalski_limits; std::vector<std::string> m_protocluster_variables; ClusterList <T> *m_previous_protoclusters; // T m_protocluster_scale; // int m_protocluster_min_size; MultiArray<bool> *m_prev_cluster_area; MultiArray<bool> *m_curr_cluster_area; // Attributes for calculating brightness // temperature from the spectral radiances map <size_t, T> m_c1; map <size_t, T> m_c2; map <size_t, T> m_alpha; map <size_t, T> m_beta; map <size_t, T> m_wavenumber; // Members for range based weight calculations PointIndex <T> *m_index; // index for range search vector <T> m_bandwidth; // search radius for numerous operations SearchParameters *m_search_params; // search params for search #if DEBUG_CI_SCORE MultiArray<T> *m_score_108; MultiArray<T> *m_score_108_trend; MultiArray<T> *m_score_62_108; MultiArray<T> *m_score_134_108; MultiArray<T> *m_62_108_trend; MultiArray<T> *m_134_108_trend; #endif public: /** * * @param params * @param ctx */ OASECIWeightFunction(const detection_params_t <T> &params, const detection_context_t <T> &ctx) : m_super_params(params), m_super_context(ctx), m_data_store(NULL), m_weight(new MultiArrayBlitz<T>(ctx.coord_system->get_dimension_sizes(), 0.0)) { using namespace utils; try { NcFile file(params.filename.c_str(), NcFile::read); for (size_t i = 0; i < CI_WEIGHT_NUM_VARS; i++) { m_variable_names.push_back(std::string(CI_WEIGHT_VARS[i])); NcVar var = file.getVar(CI_WEIGHT_VARS[i]); if (var.isNull()) { cerr << "FATAL: file requires variable " << CI_WEIGHT_VARS[i] << " for CI interest weight" << endl; exit(EXIT_FAILURE); } // Obtain the constants for transforming radiances // into brightness temperatures fromt he attributes: namespace nu = m3D::utils::netcdf; switch (i) { case msevi_l15_ir_108: case msevi_l15_wv_062: case msevi_l15_ir_134: { m_c1[i] = nu::get_attribute_value<T>(var, "rad_const1"); m_c2[i] = nu::get_attribute_value<T>(var, "rad_const2"); m_alpha[i] = nu::get_attribute_value<T>(var, "alpha"); m_beta[i] = nu::get_attribute_value<T>(var, "beta"); m_wavenumber[i] = nu::get_attribute_value<T>(var, "wavenum"); } } } } catch (netCDF::exceptions::NcException &e) { cerr << "FATAL: can not read from netcdf file " << params.filename << endl; exit(EXIT_FAILURE); } // Create the data store this->m_data_store = new NetCDFDataStore<T>(params.filename, m_variable_names, params.dimensions, params.dimension_variables, params.time_index); // index for effective range search ops m_bandwidth = ctx.fs->spatial_component(ctx.bandwidth); m_index = PointIndex<T>::create(&ctx.fs->points, ctx.coord_system->rank()); m_search_params = new RangeSearchParams<T>(m_bandwidth); this->obtain_protoclusters(); if (params.ci_comparison_file != NULL) { //#if WITH_OPENCV // Section kept for historic interest // Attempt (b) : estimate dense motion vector field using opencv // and shift values along the field //namespace ov = m3D::utils::opencv; //m_ci_comparison_data_store = ov::shifted_store_from_flow_of_variable(filename, *ci_comparison_file, // fs->coordinate_system, // this->m_variable_names, // msevi_l15_hrv, 7.0); //#endif // TODO: there is a problem here. The time index should // be configurable when the comparison file is the same // but has time dimension. m_ci_comparison_data_store = new NetCDFDataStore<T>(params.filename, m_variable_names, params.dimensions, params.dimension_variables, params.time_index); if (params.ci_comparison_protocluster_file != NULL) { // Load previous proto-clusters m_previous_protoclusters = ClusterList<T>::read(*params.ci_comparison_protocluster_file); // Shift previous data by tracking protoclusters and use // the resulting tracking vectors / clusters cout << endl << "Shifting comparison data ..."; start_timer(); this->shift_comparison_data_using_protoclusters(params.ci_comparison_protocluster_file); cout << " done (" << stop_timer() << "s)" << endl; // Reduce the data by calculating the cluster overlap area // (used later in weight function calculation) cout << endl << "Calculating overlap ..."; start_timer(); this->calculate_overlap(); cout << " done (" << stop_timer() << "s)" << endl; // Replace all pixels with the with average of 25% coldest pixels // in their vicinity (10km radius). cout << endl << "Replacing with average of 25% coldest pixels (comparison data) ..."; start_timer(); // NOTE: using fs here is not entirely correct, because // the featurespace was constructed from the wrong // datastore. This 'should' not be a problem, because // of the overlap calculation this->replace_with_coldest_pixels(m_ci_comparison_data_store, ctx.fs); cout << " done (" << stop_timer() << "s)" << endl; } } cout << endl << "Replacing with average of 25% coldest pixels (current data) ..."; start_timer(); this->replace_with_coldest_pixels(m_data_store, ctx.fs); cout << " done (" << stop_timer() << "s)" << endl; cout << endl << "Calculating final weight score ..."; start_timer(); calculate_weight_function(ctx.fs); cout << " done (" << stop_timer() << "s)" << endl; } ~OASECIWeightFunction() { if (this->m_data_store != NULL) { delete this->m_data_store; this->m_data_store = NULL; } if (this->m_index != NULL) { delete this->m_index; this->m_index = NULL; } if (this->m_search_params != NULL) { delete this->m_search_params; this->m_search_params = NULL; } if (this->m_ci_comparison_data_store != NULL) { delete this->m_ci_comparison_data_store; this->m_ci_comparison_data_store = NULL; } if (this->m_previous_protoclusters != NULL) { delete this->m_previous_protoclusters; this->m_previous_protoclusters = NULL; } if (this->m_overlap != NULL) { delete this->m_overlap; this->m_overlap = NULL; } if (this->m_prev_cluster_area != NULL) { delete this->m_prev_cluster_area; this->m_prev_cluster_area = NULL; } if (this->m_curr_cluster_area != NULL) { delete this->m_curr_cluster_area; this->m_curr_cluster_area = NULL; } // clean up clustering mess right away to free memory Detection<T>::cleanup(m_params, m_ctx); } private: void obtain_protoclusters() { cout << endl << endl; cout << "+ ---------------------------- +" << endl; cout << "+ Obtaining protoclusters +" << endl; cout << "+ ---------------------------- +" << endl; m_params = Detection<T>::defaultParams(); // Dimensions stay the same m_params.dimensions = m_super_params.dimensions; m_params.dimension_variables = m_super_params.dimension_variables; // Add variables try { for (size_t i = 0; i < PROTOCLUSTER_NUM_VARS; i++) { std::string name = std::string(PROTOCLUSTER_VARS[i]); m_params.variables.push_back(name); } } catch (netCDF::exceptions::NcException &e) { cerr << "FATAL: can not read from netcdf file " << m_ci_comparison_data_store->filename() << endl; exit(EXIT_FAILURE); } // cut msevi_l15_ir_108 at max 0 centigrade m_params.upper_thresholds[0] = spectral_radiance(msevi_l15_ir_108, 0); // Set other parameters m_params.filename = m_super_params.filename; m_params.scale = m_super_params.ci_protocluster_scale; m_params.min_cluster_size = m_super_params.ci_protocluster_min_size; m_params.verbosity = m_super_params.verbosity; Detection<T>::run(m_params, m_ctx); // Write protoclusters out boost::filesystem::path path(m_super_params.filename); std::string fn = "protoclusters-" + path.stem().generic_string<std::string>() + ".nc"; m_ctx.clusters->write(fn); } void shift_comparison_data_using_protoclusters(const std::string *ci_comparison_protocluster_file) { using namespace utils::vectors; if (ci_comparison_protocluster_file != NULL) { vector<size_t> dims = m_super_context.coord_system->get_dimension_sizes(); // Perform a tracking run tracking_param_t params = Tracking<T>::defaultParams(); params.tracking_variable = std::string(PROTOCLUSTER_VARS[0]); // Time difference calculation can be a second or so // off. Allow some slack. params.max_deltaT = ::units::values::s(930); Tracking<T> proto_tracker(params); proto_tracker.track(m_previous_protoclusters, m_ctx.clusters); m_ctx.clusters->save(); // Find object pairs and shift the all data from // the comparison scan within that object's area // to the 'forecasted' position using the center // displacement vector // Idea: improve on the result by using OpenCV's // affine transformation finder algorithm and // morph the pixels into position std::vector<std::vector<T> > origins, vectors; // initialize storage for shifted data typedef std::map<size_t, MultiArray<T> *> data_map_t; data_map_t shifted_data; for (size_t var_index = 0; var_index < m_ci_comparison_data_store->rank(); var_index++) { T NOT_SET = m_ci_comparison_data_store->fill_value(var_index); shifted_data[var_index] = new MultiArrayBlitz<T>(dims, NOT_SET); } // ::m3D::utils::opencv::display_variable(m_ci_comparison_data_store,msevi_l15_ir_108); // ::m3D::utils::opencv::display_array(m_ci_comparison_data_store->get_data(msevi_l15_ir_108), // m_ci_comparison_data_store->min(msevi_l15_ir_108), // m_ci_comparison_data_store->max(msevi_l15_ir_108)); // iterate over clusters for (size_t pi = 0; pi < m_previous_protoclusters->size(); pi++) { typename Cluster<T>::ptr pc = m_previous_protoclusters->clusters.at(pi); // find the matched candidate for (size_t ci = 0; ci < m_ctx.clusters->size(); ci++) { typename Cluster<T>::ptr cc = m_ctx.clusters->clusters.at(ci); if (pc->id == cc->id) { // Calculate average displacement typedef std::vector<T> vec_t; vec_t center_p = pc->geometrical_center(); vec_t center_c = cc->geometrical_center(); vec_t displacement = center_c - center_p; origins.push_back(center_p); vectors.push_back(displacement); // Move previous data by displacement vector typename Point<T>::list::iterator point_iter; for (point_iter = pc->get_points().begin(); point_iter != pc->get_points().end(); point_iter++) { typename Point<T>::ptr p = *point_iter; vector<T> x = p->coordinate + displacement; vector<int> source_gridpoint = p->gridpoint; vector<int> dest_gridpoint = m_ctx.coord_system->newGridPoint(); try { m_ctx.coord_system->reverse_lookup(x, dest_gridpoint); for (size_t var_index = 0; var_index < m_ci_comparison_data_store->rank(); var_index++) { bool is_in_range = false; bool is_valid = false; T value = m_ci_comparison_data_store->get(var_index, source_gridpoint, is_in_range, is_valid); if (is_valid && is_in_range) { // we are manipulating raw data in the NetCDFDataStore, which // keeps the values in 'packed' format internally. When calling // the get method above, the value is unpacked for convencience. // This means we need to pack the value again before writing it // out or the value range will be messed up. T packed_value = m_ci_comparison_data_store->packed_value(var_index, value); shifted_data[var_index]->set(dest_gridpoint, packed_value); } } } catch (std::out_of_range &e) { } } } } } // ::m3D::utils::opencv::display_array(shifted_data[msevi_l15_ir_108], // m_ci_comparison_data_store->min(msevi_l15_ir_108), // m_ci_comparison_data_store->max(msevi_l15_ir_108)); // replace the original data with the shifted data for (size_t var_index = 0; var_index < m_ci_comparison_data_store->rank(); var_index++) { MultiArray<T> *dest = shifted_data[var_index]; m_ci_comparison_data_store->set_data(var_index, dest); } //::m3D::utils::opencv:: display_variable(m_ci_comparison_data_store,msevi_l15_ir_108); boost::filesystem::path ppath(m_previous_protoclusters->source_file); std::string fn = "shifted-" + ppath.filename().stem().generic_string() + ".nc"; m_ci_comparison_data_store->save_as(fn); #if WITH_VTK fn = "vectors-" + ppath.filename().stem().generic_string() + ".vtk"; VisitUtils<T>::write_vectors_vtk(fn, origins, vectors); #endif } } // calculate overlap mask void calculate_overlap() { vector<size_t> dims = m_ctx.coord_system->get_dimension_sizes(); m_overlap = new MultiArrayBlitz<bool>(dims, false); m_prev_cluster_area = new MultiArrayBlitz<bool>(dims, false); m_curr_cluster_area = new MultiArrayBlitz<bool>(dims, false); // Mark area occupied by all protoclusters from previous set for (size_t pi = 0; pi < m_previous_protoclusters->size(); pi++) { typename Cluster<T>::ptr c = m_previous_protoclusters->clusters.at(pi); typename Point<T>::list::iterator point_iter; for (point_iter = c->get_points().begin(); point_iter != c->get_points().end(); point_iter++) { typename Point<T>::ptr p = *point_iter; m_prev_cluster_area->set(p->gridpoint, true); } } // Mark area occupied by all protoclusters from current set for (size_t pi = 0; pi < m_ctx.clusters->size(); pi++) { typename Cluster<T>::ptr c = m_ctx.clusters->clusters.at(pi); typename Point<T>::list::iterator point_iter; for (point_iter = c->get_points().begin(); point_iter != c->get_points().end(); point_iter++) { typename Point<T>::ptr p = *point_iter; m_curr_cluster_area->set(p->gridpoint, true); } } // Collate class OverlapFunctor : public MultiArray<bool>::ForEachFunctor { public: MultiArray<bool> *m_overlap; MultiArray<bool> *m_curr_cluster_area; OverlapFunctor(MultiArray<bool> *overlap, MultiArray<bool> *curr_cluster_area) : m_overlap(overlap), m_curr_cluster_area(curr_cluster_area) { }; // for_each callback functor void operator()(const vector<int> &index, const bool have_previous) { bool have_current = m_curr_cluster_area->get(index); m_overlap->set(index, have_current && have_previous); } }; OverlapFunctor f(m_overlap, m_curr_cluster_area); m_prev_cluster_area->for_each(&f); delete m_prev_cluster_area; m_prev_cluster_area = NULL; delete m_curr_cluster_area; m_prev_cluster_area = NULL; } // replace each data point in the overlap area // with the average of the 25% coldest points // within a radius h around it void replace_with_coldest_pixels(NetCDFDataStore <T> *ds, FeatureSpace <T> *fs) { float percentage = 0.25; // calculate bandwidth in pixels vector<int> bandwidth; vector<T> resolution = ds->coordinate_system()->resolution(); for (size_t i = 0; i < m_bandwidth.size(); i++) bandwidth.push_back((int) round(m_bandwidth[i] / resolution[i])); // use linear mapping to parallelize the operation LinearIndexMapping mapping(ds->get_dimension_sizes()); for (size_t var_index = 0; var_index < ds->rank(); var_index++) { // exempt radar and lightning from this if (var_index == cband_radolan_rx || var_index == linet_oase_tl) continue; MultiArray<T> *data = ds->get_data(var_index); MultiArray<T> *result = new MultiArrayBlitz<T>(data->get_dimensions()); result->copy_from(data); #if WITH_OPENMP #pragma omp parallel for schedule(dynamic) #endif for (size_t i = 0; i < mapping.size(); i++) { vector<int> gridpoint = mapping.linear_to_grid(i); vector<T> values; #if WITH_OPENMP #pragma omp critical { #endif data->values_around(gridpoint, bandwidth, values); #if WITH_OPENMP } #endif // sort the data in ascending order std::sort(values.begin(), values.end()); // calculate the number of values that make up // the required percentage int num_values = round(values.size() * percentage); // obtain the average of the last num_values values T sum = 0.0; for (int i = 0; i < num_values; i++) sum += values[i]; T average = sum / ((T) num_values); // replace the value in the result array // with the average result->set(gridpoint, average); } // Note: this frees the pointer to the old // data, we do not have to take care of it ds->set_data(var_index, result); } boost::filesystem::path ppath(ds->filename()); std::string fn = "25perc-" + ppath.filename().stem().generic_string() + ".nc"; ds->save_as(fn); } void calculate_weight_function(FeatureSpace <T> *fs) { #if DEBUG_CI_SCORE vector<size_t> dims = m_coordinate_system->get_dimension_sizes(); m_score_108 = new MultiArrayBlitz<T>(dims, 1000); m_score_108_trend = new MultiArrayBlitz<T>(dims, 1000); m_score_62_108 = new MultiArrayBlitz<T>(dims, 1000); m_score_134_108 = new MultiArrayBlitz<T>(dims, 1000); m_62_108_trend = new MultiArrayBlitz<T>(dims, 1000); m_134_108_trend = new MultiArrayBlitz<T>(dims, 1000); #endif // compute the weights for (size_t i = 0; i < fs->points.size(); i++) { Point<T> *p = fs->points[i]; bool have_overlap = (m_overlap == NULL || m_overlap->get(p->gridpoint) == true); if (have_overlap) { T saliency = this->compute_weight(p); m_weight->set(p->gridpoint, saliency); } } #if DEBUG_CI_SCORE #if WITH_VTK boost::filesystem::path ppath(m_data_store->filename()); std::string basename = ppath.filename().stem().generic_string(); std::string fn = "ci-score-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "ci-score", m_coordinate_system, m_weight); fn = "score_108-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "10.8 ", m_coordinate_system, m_score_108); fn = "score_6.2-10.8-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "6.2-10.8", m_coordinate_system, m_score_62_108); fn = "score_13.4-10.8-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "13.4-10.8", m_coordinate_system, m_score_134_108); fn = "score_10.8-trend-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "10.8-trend", m_coordinate_system, m_score_108_trend); fn = "score_6.2-10.8-trend-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "6.2-10.8-trend", m_coordinate_system, m_62_108_trend); fn = "score_13.4-10.8-trend-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "13.4-10.8-trend", m_coordinate_system, m_134_108_trend); if (m_overlap != NULL) { fn = "overlap-" + basename + ".vtk"; VisitUtils<T>::write_multiarray_vtk(fn, "overlap", m_coordinate_system, m_overlap); } #endif delete m_score_108; delete m_score_108_trend; delete m_score_62_108; delete m_score_134_108; delete m_62_108_trend; delete m_134_108_trend; #endif // dispose of stuff we do not longer need delete this->m_data_store; this->m_data_store = NULL; delete this->m_index; this->m_index = NULL; delete this->m_search_params; this->m_search_params = NULL; }; public: /** Calculates the brightness temperature in degree centigrade * from the given seviri count * @param one of msevi_l15_ir_108, msevi_l15_wv_062, msevi_l15_ir_134 * @param radiance value for the given channel * @return brightness temperature in [C] */ T brightness_temperature(const size_t var_index, const T &radiance) { T wavenum = m_wavenumber[var_index]; T Tbb = m_c2[var_index] * wavenum / log(1 + wavenum * wavenum * wavenum * m_c1[var_index] / radiance); T Tb = (Tbb - m_beta[var_index]) / m_alpha[var_index]; return Tb - 273.15; } /** Inverse calculation. Spectral radiance from temperature * in degree centigrade * @param one of msevi_l15_ir_108, msevi_l15_wv_062, msevi_l15_ir_134 * @param brightness temperature in [C] * @return radiance value for the given channel */ T spectral_radiance(const size_t var_index, const T &temperature) { T wavenum = m_wavenumber[var_index]; T Tbb = (temperature + 273.15) * m_alpha[var_index] + m_beta[var_index]; return wavenum * wavenum * wavenum * m_c1[var_index] / (exp(m_c2[var_index] * wavenum / Tbb) - 1); } private: /** Calculates the weight at the given point using the * the scoring scheme. */ T compute_weight(Point <T> *p) { // Silke's suggestion: when radar is present, use max score to // make sure objects are tracked. T max_score = (m_super_params.ci_comparison_file != NULL) ? 8 : 6; // If only satellite data is used, subtract lightning // and radar from max score if (m_super_params.ci_satellite_only) max_score -= 2; vector<int> g = p->gridpoint; bool isInRange = false; bool isValid = false; // TODO: validity checks T ir_108_radiance = this->m_data_store->get(msevi_l15_ir_108, g, isInRange, isValid); T ir_108_temp = brightness_temperature(msevi_l15_ir_108, ir_108_radiance); #if DEBUG_CI_SCORE m_score_108->set(g, ir_108_temp); #endif T wv_062_rad = this->m_data_store->get(msevi_l15_wv_062, g, isInRange, isValid); T wv_062_temp = brightness_temperature(msevi_l15_wv_062, wv_062_rad); T ir_134_rad = this->m_data_store->get(msevi_l15_ir_134, g, isInRange, isValid); T ir_134_temp = brightness_temperature(msevi_l15_ir_134, ir_134_rad); // Calculate score int score = 0; // IR 10.7 critical value if (ir_108_temp <= 0.0) { score++; } // IR 0.65 - IR 10.7 T delta_wv_062_ir_108 = wv_062_temp - ir_108_temp; #if DEBUG_CI_SCORE m_score_62_108->set(g, delta_wv_062_ir_108); #endif if (m_super_params.ci_use_walker_mecikalski) { if (delta_wv_062_ir_108 >= -35.0 && delta_wv_062_ir_108 <= -10.0) score++; } else { if (delta_wv_062_ir_108 <= 2.0) score++; } // IR 13.3 - IR 10.7 T delta_ir_134_ir_108 = ir_134_temp - ir_108_temp; #if DEBUG_CI_SCORE m_score_134_108->set(g, delta_ir_134_ir_108); #endif if (m_super_params.ci_use_walker_mecikalski) { if (delta_ir_134_ir_108 >= -25.0 && delta_ir_134_ir_108 <= -5.0) score++; } else { if (-delta_ir_134_ir_108 <= 2.0) score++; } if (m_super_params.ci_comparison_file != NULL) { // WARNING: this assumes the dime difference is 15 mins!! // TODO: adapt the calculation for different intervals? T ir_108_radiance_prev = this->m_ci_comparison_data_store->get(msevi_l15_ir_108, g, isInRange, isValid); T ir_108_temp_prev = brightness_temperature(msevi_l15_ir_108, ir_108_radiance_prev); T wv_062_rad_prev = this->m_ci_comparison_data_store->get(msevi_l15_wv_062, g, isInRange, isValid); T wv_062_temp_prev = brightness_temperature(msevi_l15_wv_062, wv_062_rad_prev); T ir_134_rad_prev = this->m_ci_comparison_data_store->get(msevi_l15_ir_134, g, isInRange, isValid); T ir_134_temp_prev = brightness_temperature(msevi_l15_ir_134, ir_134_rad_prev); T dT1 = ir_108_temp - ir_108_temp_prev; if (m_super_params.ci_use_walker_mecikalski) { if (dT1 <= -4.0) score++; } else { if (dT1 <= -2.0) score++; } T dT2 = (wv_062_temp - ir_108_temp) - (wv_062_temp_prev - ir_108_temp_prev); if (m_super_params.ci_use_walker_mecikalski) { if (dT2 >= 3.0) score++; } else { if (dT2 >= 1.0) score++; } T dT3 = (ir_134_temp - ir_108_temp) - (ir_134_temp_prev - ir_108_temp_prev); if (m_super_params.ci_use_walker_mecikalski) { if (dT3 > 3.0) score++; } else { if (dT3 >= 1.0) score++; } #if DEBUG_CI_SCORE m_score_108_trend->set(g, dT1); m_62_108_trend->set(g, dT2); m_134_108_trend->set(g, dT3); #endif } if (!m_super_params.ci_satellite_only) { // Is radar signature > 25dBZ and/or lightning present in 5km radius? bool has_lightning = false; bool has_radar = false; typename Point<T>::list *neighbors = m_index->search(p->coordinate, m_search_params); T cband_rx, linet_count; for (size_t pi = 0; pi < neighbors->size() && !(has_lightning || has_radar); pi++) { typename Point<T>::ptr n = neighbors->at(pi); bool neighbour_is_in_range = false; bool neighbour_is_valid = false; if (!has_radar) { cband_rx = this->m_data_store->get(cband_radolan_rx, n->gridpoint, neighbour_is_in_range, neighbour_is_valid); // Start using radar at light rain (>= 25dBZ) has_radar = (neighbour_is_valid && cband_rx >= 25.0 && cband_rx <= 65); } if (!has_radar) { neighbour_is_valid = false; neighbour_is_in_range = false; linet_count = this->m_data_store->get(linet_oase_tl, n->gridpoint, neighbour_is_in_range, neighbour_is_valid); has_lightning = (neighbour_is_valid && linet_count > 0.0); } } delete neighbors; // If any lightning is present in 5km radius: // increase score if (has_lightning) { score++; } // If light rain or more is present in 5km radius: // increase score to max if (has_radar) { score = max_score; } } return score; } T operator()(const typename Point<T>::ptr p) const { return m_weight->get(p->gridpoint); } }; } #endif
utility.h
/* ************************************************************************ * Copyright (C) 2016-2022 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ************************************************************************ */ #pragma once #include "hipblas.h" #include <stdbool.h> #ifdef __cplusplus #include "cblas_interface.h" #include "complex.hpp" #include "hipblas_datatype2string.hpp" #include <cmath> #include <immintrin.h> #include <iostream> #include <random> #include <type_traits> #include <vector> #endif #include <stdio.h> #include <stdlib.h> #ifdef GOOGLE_TEST #include "gtest/gtest.h" #endif /*!\file * \brief provide data initialization, timing, hipblas type <-> lapack char conversion utilities. */ #define CHECK_HIP_ERROR(error) \ do \ { \ hipError_t error__ = (error); \ if(error__ != hipSuccess) \ { \ fprintf(stderr, \ "hip error: '%s'(%d) at %s:%d\n", \ hipGetErrorString(error__), \ error__, \ __FILE__, \ __LINE__); \ exit(EXIT_FAILURE); \ } \ } while(0) #ifdef __cplusplus #ifndef CHECK_HIPBLAS_ERROR #define EXPECT_HIPBLAS_STATUS(status, expected) \ do \ { \ hipblasStatus_t status__ = (status); \ if(status__ != expected) \ { \ fprintf(stderr, \ "hipBLAS error: %s at %s:%d\n", \ hipblasStatusToString(status__), \ __FILE__, \ __LINE__); \ return (status__); \ } \ } while(0) #define CHECK_HIPBLAS_ERROR(STATUS) EXPECT_HIPBLAS_STATUS(STATUS, HIPBLAS_STATUS_SUCCESS) #endif #define BLAS_1_RESULT_PRINT \ do \ { \ if(argus.timing) \ { \ std::cout << "N, hipblas (us), "; \ if(argus.norm_check) \ { \ std::cout << "CPU (us), error"; \ } \ std::cout << std::endl; \ std::cout << N << ',' << gpu_time_used << ','; \ if(argus.norm_check) \ { \ std::cout << cpu_time_used << ','; \ std::cout << hipblas_error; \ } \ std::cout << std::endl; \ } \ } while(0) // Return true if value is NaN template <typename T> inline bool hipblas_isnan(T) { return false; } inline bool hipblas_isnan(double arg) { return std::isnan(arg); } inline bool hipblas_isnan(float arg) { return std::isnan(arg); } inline bool hipblas_isnan(hipblasHalf arg) { return (~arg & 0x7c00) == 0 && (arg & 0x3ff) != 0; } inline bool hipblas_isnan(hipblasComplex arg) { return std::isnan(arg.real()) || std::isnan(arg.imag()); } inline bool hipblas_isnan(hipblasDoubleComplex arg) { return std::isnan(arg.real()) || std::isnan(arg.imag()); } // Helper routine to convert floats into their half equivalent; uses F16C instructions inline hipblasHalf float_to_half(float val) { // return static_cast<hipblasHalf>( _mm_cvtsi128_si32( _mm_cvtps_ph( _mm_set_ss( val ), 0 ) ) uint16_t a = _cvtss_sh(val, 0); return a; } // Helper routine to convert halfs into their floats equivalent; uses F16C instructions inline float half_to_float(hipblasHalf val) { // return static_cast<hipblasHalf>(_mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(val), 0))); return _cvtsh_ss(val); } // zero extend lower 16 bits of bfloat16 to convert to IEEE float inline float bfloat16_to_float(hipblasBfloat16 val) { union { uint32_t int32; float fp32; } u = {uint32_t(val.data) << 16}; return u.fp32; } inline hipblasBfloat16 float_to_bfloat16(float f) { hipblasBfloat16 rv; union { float fp32; uint32_t int32; } u = {f}; if(~u.int32 & 0x7f800000) { u.int32 += 0x7fff + ((u.int32 >> 16) & 1); // Round to nearest, round to even } else if(u.int32 & 0xffff) { u.int32 |= 0x10000; // Preserve signaling NaN } rv.data = uint16_t(u.int32 >> 16); return rv; } /* =============================================================================================== */ /* Complex / real helpers. */ template <typename T> static constexpr bool is_complex = false; template <> HIPBLAS_CLANG_STATIC constexpr bool is_complex<hipblasComplex> = true; template <> HIPBLAS_CLANG_STATIC constexpr bool is_complex<hipblasDoubleComplex> = true; // Get base types from complex types. template <typename T, typename = void> struct real_t_impl { using type = T; }; template <typename T> struct real_t_impl<T, std::enable_if_t<is_complex<T>>> { using type = decltype(T{}.real()); }; template <typename T> using real_t = typename real_t_impl<T>::type; /* ============================================================================================ */ /*! \brief Random number generator which generates NaN values */ using hipblas_rng_t = std::mt19937; extern hipblas_rng_t hipblas_rng, hipblas_seed; // Reset the seed (mainly to ensure repeatability of failures in a given suite) inline void hipblas_seedrand() { hipblas_rng = hipblas_seed; } class hipblas_nan_rng { // Generate random NaN values template <typename T, typename UINT_T, int SIG, int EXP> static T random_nan_data() { static_assert(sizeof(UINT_T) == sizeof(T), "Type sizes do not match"); union u_t { u_t() {} UINT_T u; T fp; } x; do x.u = std::uniform_int_distribution<UINT_T>{}(hipblas_rng); while(!(x.u & (((UINT_T)1 << SIG) - 1))); // Reject Inf (mantissa == 0) x.u |= (((UINT_T)1 << EXP) - 1) << SIG; // Exponent = all 1's return x.fp; // NaN with random bits } public: // Random integer template <typename T, typename std::enable_if<std::is_integral<T>{}, int>::type = 0> explicit operator T() { return std::uniform_int_distribution<T>{}(hipblas_rng); } explicit operator signed char() { return static_cast<signed char>(std::uniform_int_distribution<int>{}(hipblas_rng)); } // Random NaN double explicit operator double() { return random_nan_data<double, uint64_t, 52, 11>(); } // Random NaN float explicit operator float() { return random_nan_data<float, uint32_t, 23, 8>(); } // Random NaN half (non-template hipblasHalf takes precedence over integer template above) explicit operator hipblasHalf() { return random_nan_data<hipblasHalf, uint16_t, 10, 5>(); } // Random NaN bfloat16 explicit operator hipblasBfloat16() { return random_nan_data<hipblasBfloat16, uint16_t, 7, 8>(); } // Random NaN Complex explicit operator hipblasComplex() { return {float(*this), float(*this)}; } // Random NaN Double Complex explicit operator hipblasDoubleComplex() { return {double(*this), double(*this)}; } // // Currently not needed // // Random complex integers // explicit operator hipblasInt8Complex() // { // return static_cast<int8_t>( // std::uniform_int_distribution<unsigned short>(1, 3)(hipblas_rng)); // } }; /* ============================================================================================ */ /*! \brief negate a value */ // Can rename to simply "negate" after removing usage of `using namespace std;` template <class T> inline T hipblas_negate(T x) { return -x; } template <> inline hipblasHalf hipblas_negate(hipblasHalf arg) { union { hipblasHalf fp; uint16_t data; } x = {arg}; x.data ^= 0x8000; return x.fp; } template <> inline hipblasBfloat16 hipblas_negate(hipblasBfloat16 x) { x.data ^= 0x8000; return x; } /* ============================================================================================ */ /* generate random number :*/ /*! \brief generate a random number in range [1,2,3,4,5,6,7,8,9,10] */ template <typename T> T random_generator() { // return rand()/( (T)RAND_MAX + 1); return T(rand() % 10 + 1); }; /*! \brief generate a random NaN number */ template <typename T> inline T random_nan_generator() { return T(hipblas_nan_rng{}); } // for hipblasHalf, generate float, and convert to hipblasHalf /*! \brief generate a random number in range [1,2,3] */ template <> inline hipblasHalf random_generator<hipblasHalf>() { return float_to_half(float((rand() % 3 + 1))); // generate an integer number in range [1,2,3] }; // for hipblasBfloat16, generate float, and convert to hipblasBfloat16 template <> inline hipblasBfloat16 random_generator<hipblasBfloat16>() { return float_to_bfloat16( float((rand() % 3 + 1))); // generate an integer number in range [1,2,3] } // for hipblasComplex, generate 2 floats /*! \brief generate two random numbers in range [1,2,3,4,5,6,7,8,9,10] */ template <> inline hipblasComplex random_generator<hipblasComplex>() { return hipblasComplex(rand() % 10 + 1, rand() % 10 + 1); return {float(rand() % 10 + 1), float(rand() % 10 + 1)}; } // for hipblasDoubleComplex, generate 2 doubles /*! \brief generate two random numbers in range [1,2,3,4,5,6,7,8,9,10] */ template <> inline hipblasDoubleComplex random_generator<hipblasDoubleComplex>() { return hipblasDoubleComplex(rand() % 10 + 1, rand() % 10 + 1); return {double(rand() % 10 + 1), double(rand() % 10 + 1)}; } /*! \brief generate a random number in range [-1,-2,-3,-4,-5,-6,-7,-8,-9,-10] */ template <typename T> inline T random_generator_negative() { // return rand()/( (T)RAND_MAX + 1); return -T(rand() % 10 + 1); }; // for hipblasHalf, generate float, and convert to hipblasHalf /*! \brief generate a random number in range [-1,-2,-3] */ template <> inline hipblasHalf random_generator_negative<hipblasHalf>() { return float_to_half(-float((rand() % 3 + 1))); }; // for hipblasBfloat16, generate float, and convert to hipblasBfloat16 /*! \brief generate a random number in range [-1,-2,-3] */ template <> inline hipblasBfloat16 random_generator_negative<hipblasBfloat16>() { return float_to_bfloat16(-float((rand() % 3 + 1))); }; // for complex, generate two values, convert both to negative /*! \brief generate a random real value in range [-1, -10] and random * imaginary value in range [-1, -10] */ template <> inline hipblasComplex random_generator_negative<hipblasComplex>() { return {float(-(rand() % 10 + 1)), float(-(rand() % 10 + 1))}; } template <> inline hipblasDoubleComplex random_generator_negative<hipblasDoubleComplex>() { return {double(-(rand() % 10 + 1)), double(-(rand() % 10 + 1))}; } // HPL /*! \brief generate a random number in HPL-like [-0.5,0.5] doubles */ template <typename T> inline T random_hpl_generator() { return std::uniform_real_distribution<double>(-0.5, 0.5)(hipblas_rng); } // for hipblasBfloat16, generate float, and convert to hipblasBfloat16 /*! \brief generate a random number in HPL-like [-0.5,0.5] doubles */ template <> inline hipblasBfloat16 random_hpl_generator() { return hipblasBfloat16( float_to_bfloat16(std::uniform_real_distribution<float>(-0.5, 0.5)(hipblas_rng))); } /* ============================================================================================ */ /* ============================================================================================ */ /*! \brief Packs strided_batched matricies into groups of 4 in N */ template <typename T> void hipblas_packInt8( std::vector<T>& A, size_t M, size_t N, size_t lda, size_t batch_count = 1, size_t stride_a = 0) { if(N % 4 != 0) std::cerr << "ERROR: dimension must be a multiple of 4 in order to pack" << std::endl; std::vector<T> temp(A); for(size_t b = 0; b < batch_count; b++) for(size_t colBase = 0; colBase < N; colBase += 4) for(size_t row = 0; row < lda; row++) for(size_t colOffset = 0; colOffset < 4; colOffset++) A[(colBase * lda + 4 * row) + colOffset + (stride_a * b)] = temp[(colBase + colOffset) * lda + row + (stride_a * b)]; } template <typename T> void hipblas_packInt8(T* A, const T* temp, size_t M, size_t N, size_t lda) { if(N % 4 != 0) std::cerr << "ERROR: dimension must be a multiple of 4 in order to pack" << std::endl; for(size_t colBase = 0; colBase < N; colBase += 4) for(size_t row = 0; row < lda; row++) for(size_t colOffset = 0; colOffset < 4; colOffset++) A[(colBase * lda + 4 * row) + colOffset] = temp[(colBase + colOffset) * lda + row]; } /* ============================================================================================ */ /* ============================================================================================ */ /*! \brief matrix/vector initialization: */ // for vector x (M=1, N=lengthX, lda=incx); // for complex number, the real/imag part would be initialized with the same value template <typename T> void hipblas_init( std::vector<T>& A, int M, int N, int lda, hipblasStride stride = 0, int batch_count = 1) { for(int b = 0; b < batch_count; b++) for(int i = 0; i < M; ++i) for(int j = 0; j < N; ++j) A[i + j * lda + b * stride] = random_generator<T>(); } template <typename T> void hipblas_init(T* A, int M, int N, int lda, hipblasStride stride = 0, int batch_count = 1) { for(int b = 0; b < batch_count; b++) for(int i = 0; i < M; ++i) for(int j = 0; j < N; ++j) A[i + j * lda + b * stride] = random_generator<T>(); } template <typename T> void hipblas_init_alternating_sign(std::vector<T>& A, int M, int N, int lda) { // Initialize matrix so adjacent entries have alternating sign. // In gemm if either A or B are initialized with alernating // sign the reduction sum will be summing positive // and negative numbers, so it should not get too large. // This helps reduce floating point inaccuracies for 16bit // arithmetic where the exponent has only 5 bits, and the // mantissa 10 bits. for(int i = 0; i < M; ++i) for(int j = 0; j < N; ++j) if(j % 2 ^ i % 2) A[i + j * lda] = random_generator<T>(); else A[i + j * lda] = random_generator_negative<T>(); } template <typename T> void hipblas_init_alternating_sign( std::vector<T>& A, int M, int N, int lda, hipblasStride stride, int batch_count) { // Initialize matrix so adjacent entries have alternating sign. // In gemm if either A or B are initialized with alernating // sign the reduction sum will be summing positive // and negative numbers, so it should not get too large. // This helps reduce floating point inaccuracies for 16bit // arithmetic where the exponent has only 5 bits, and the // mantissa 10 bits. for(int i_batch = 0; i_batch < batch_count; i_batch++) for(int i = 0; i < M; ++i) for(int j = 0; j < N; ++j) if(j % 2 ^ i % 2) A[i + j * lda + i_batch * stride] = random_generator<T>(); else A[i + j * lda + i_batch * stride] = random_generator_negative<T>(); } // Initialize matrix so adjacent entries have alternating sign. template <typename T> void hipblas_init_hpl_alternating_sign( T* A, size_t M, size_t N, size_t lda, size_t stride = 0, size_t batch_count = 1) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) #pragma omp parallel for for(size_t j = 0; j < N; ++j) { size_t offset = j * lda + i_batch * stride; for(size_t i = 0; i < M; ++i) { auto value = random_hpl_generator<T>(); A[i + offset] = (i ^ j) & 1 ? value : hipblas_negate(value); } } } template <typename T> void hipblas_init_hpl_alternating_sign( std::vector<T>& A, size_t M, size_t N, size_t lda, size_t stride = 0, size_t batch_count = 1) { hipblas_init_hpl_alternating_sign(A.data(), M, N, lda, stride, batch_count); } // Initialize vector with HPL-like random values template <typename T> void hipblas_init_hpl( std::vector<T>& A, size_t M, size_t N, size_t lda, size_t stride = 0, size_t batch_count = 1) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) for(size_t i = 0; i < M; ++i) for(size_t j = 0; j < N; ++j) A[i + j * lda + i_batch * stride] = random_hpl_generator<T>(); } template <typename T> void hipblas_init_hpl( T* A, size_t M, size_t N, size_t lda, size_t stride = 0, size_t batch_count = 1) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) for(size_t i = 0; i < M; ++i) for(size_t j = 0; j < N; ++j) A[i + j * lda + i_batch * stride] = random_hpl_generator<T>(); } template <typename T> inline void hipblas_init_cos(T* A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) #pragma omp parallel for for(size_t j = 0; j < N; ++j) { size_t offset = j * lda + i_batch * stride; for(size_t i = 0; i < M; ++i) A[i + offset] = T(cos(i + offset)); } } template <typename T> inline void hipblas_init_cos( std::vector<T>& A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { hipblas_init_cos(A.data(), M, N, lda, stride, batch_count); } template <typename T> inline void hipblas_init_sin(T* A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) #pragma omp parallel for for(size_t j = 0; j < N; ++j) { size_t offset = j * lda + i_batch * stride; for(size_t i = 0; i < M; ++i) A[i + offset] = T(sin(i + offset)); } } template <typename T> inline void hipblas_init_sin( std::vector<T>& A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { hipblas_init_sin(A.data(), M, N, lda, stride, batch_count); } template <> inline void hipblas_init_cos<hipblasBfloat16>( hipblasBfloat16* A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) #pragma omp parallel for for(size_t j = 0; j < N; ++j) { size_t offset = j * lda + i_batch * stride; for(size_t i = 0; i < M; ++i) A[i + offset] = float_to_bfloat16(cos(i + offset)); } } template <> inline void hipblas_init_cos<hipblasBfloat16>(std::vector<hipblasBfloat16>& A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { hipblas_init_cos<hipblasBfloat16>(A.data(), M, N, lda, stride, batch_count); } template <> inline void hipblas_init_sin<hipblasBfloat16>( hipblasBfloat16* A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) #pragma omp parallel for for(size_t j = 0; j < N; ++j) { size_t offset = j * lda + i_batch * stride; for(size_t i = 0; i < M; ++i) A[i + offset] = float_to_bfloat16(sin(i + offset)); } } template <> inline void hipblas_init_sin<hipblasBfloat16>(std::vector<hipblasBfloat16>& A, size_t M, size_t N, size_t lda, size_t stride, size_t batch_count) { hipblas_init_sin<hipblasBfloat16>(A.data(), M, N, lda, stride, batch_count); } /*! \brief symmetric matrix initialization: */ // for real matrix only template <typename T> void hipblas_init_symmetric(std::vector<T>& A, int N, int lda) { for(int i = 0; i < N; ++i) for(int j = 0; j <= i; ++j) { auto r = random_generator<T>(); A[j + i * lda] = r; A[i + j * lda] = r; } } /*! \brief symmetric matrix initialization for strided_batched matricies: */ template <typename T> void hipblas_init_symmetric( std::vector<T>& A, int N, int lda, hipblasStride strideA, int batch_count) { for(int b = 0; b < batch_count; b++) for(int off = b * strideA, i = 0; i < N; ++i) for(int j = 0; j <= i; ++j) { auto r = random_generator<T>(); A[i + j * lda + off] = r; A[j + i * lda + off] = r; } } /*! \brief hermitian matrix initialization: */ // for complex matrix only, the real/imag part would be initialized with the same value // except the diagonal elment must be real template <typename T> void hipblas_init_hermitian(std::vector<T>& A, int N, int lda) { for(int i = 0; i < N; ++i) for(int j = 0; j <= i; ++j) if(i == j) A[j + i * lda] = random_generator<real_t<T>>(); else A[j + i * lda] = A[i + j * lda] = random_generator<T>(); } /* ============================================================================================ */ /*! \brief Initialize an array with random data, with NaN where appropriate */ template <typename T> inline void hipblas_init_nan(T* A, size_t N) { for(size_t i = 0; i < N; ++i) A[i] = T(hipblas_nan_rng()); } template <typename T> inline void hipblass_init_nan( std::vector<T>& A, size_t M, size_t N, size_t lda, size_t stride = 0, size_t batch_count = 1) { for(size_t i_batch = 0; i_batch < batch_count; i_batch++) for(size_t i = 0; i < M; ++i) for(size_t j = 0; j < N; ++j) A[i + j * lda + i_batch * stride] = T(hipblas_nan_rng()); } /* ============================================================================================= */ /*! \brief For testing purposes, to convert a regular matrix to a packed matrix. */ template <typename T> inline void regular_to_packed(bool upper, const T* A, T* AP, int n) { int index = 0; if(upper) for(int i = 0; i < n; i++) for(int j = 0; j <= i; j++) AP[index++] = A[j + i * n]; else for(int i = 0; i < n; i++) for(int j = i; j < n; j++) AP[index++] = A[j + i * n]; } /* ============================================================================ */ /* \brief For testing purposes, to convert a regular matrix to a banded matrix. */ template <typename T> inline void regular_to_banded(bool upper, const T* A, int lda, T* AB, int ldab, int n, int k) { // convert regular hA matrix to banded hAB matrix. for(int j = 0; j < n; j++) { int min1 = upper ? std::max(0, j - k) : j; int max1 = upper ? j : std::min(n - 1, j + k); int m = upper ? k - j : -j; // Move bands of hA into new banded hAB format. for(int i = min1; i <= max1; i++) AB[j * ldab + (m + i)] = A[j * lda + i]; min1 = upper ? k + 1 : std::min(k + 1, n - j); max1 = ldab - 1; // fill in bottom with random data to ensure we aren't using it. // for !upper, fill in bottom right triangle as well. for(int i = min1; i <= max1; i++) hipblas_init<T>(AB + j * ldab + i, 1, 1, 1); // for upper, fill in top left triangle with random data to ensure // we aren't using it. if(upper) { for(int i = 0; i < m; i++) hipblas_init<T>(AB + j * ldab + i, 1, 1, 1); } } } /* ============================================================================== */ /* \brief For testing purposes, zeros out elements not needed in a banded matrix. */ template <typename T> inline void banded_matrix_setup(bool upper, T* A, int lda, int n, int k) { // Make A a banded matrix with k sub/super diagonals. for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { if(upper && (j > k + i || i > j)) A[j * n + i] = T(0); else if(!upper && (i > k + j || j > i)) A[j * n + i] = T(0); } } } /* ============================================================================================= */ /*! \brief For testing purposes, makes a matrix hA into a unit_diagonal matrix and * * randomly initialize the diagonal. */ template <typename T> void make_unit_diagonal(hipblasFillMode_t uplo, T* hA, int lda, int N) { if(uplo == HIPBLAS_FILL_MODE_LOWER) { for(int i = 0; i < N; i++) { T diag = hA[i + i * N]; for(int j = 0; j <= i; j++) hA[i + j * lda] = hA[i + j * lda] / diag; } } else // rocblas_fill_upper { for(int j = 0; j < N; j++) { T diag = hA[j + j * lda]; for(int i = 0; i <= j; i++) hA[i + j * lda] = hA[i + j * lda] / diag; } } // randomly initalize diagonal to ensure we aren't using it's values for tests. for(int i = 0; i < N; i++) { hipblas_init<T>(hA + i * lda + i, 1, 1, 1); } } /* ============================================================================================= */ /*! \brief For testing purposes, prepares matrix hA for a triangular solve. * * Makes hA strictly diagonal dominant (SPD), then calculates Cholesky factorization * * of hA. */ template <typename T> void prepare_triangular_solve(T* hA, int lda, T* AAT, int N, char char_uplo) { // calculate AAT = hA * hA ^ T cblas_gemm<T>(HIPBLAS_OP_N, HIPBLAS_OP_C, N, N, N, T(1.0), hA, lda, hA, lda, T(0.0), AAT, lda); // copy AAT into hA, make hA strictly diagonal dominant, and therefore SPD for(int i = 0; i < N; i++) { T t = 0.0; for(int j = 0; j < N; j++) { hA[i + j * lda] = AAT[i + j * lda]; t += std::abs(AAT[i + j * lda]); } hA[i + i * lda] = t; } // calculate Cholesky factorization of SPD matrix hA cblas_potrf<T>(char_uplo, N, hA, lda); } /* ============================================================================================ */ /*! \brief turn float -> 's', double -> 'd', hipblas_float_complex -> 'c', hipblas_double_complex * -> 'z' */ template <typename T> char type2char(); /* ============================================================================================ */ /*! \brief turn float -> int, double -> int, hipblas_float_complex.real() -> int, * hipblas_double_complex.real() -> int */ template <typename T> int type2int(T val); /* ============================================================================================ */ /*! \brief Debugging purpose, print out CPU and GPU result matrix, not valid in complex number */ template <typename T, std::enable_if_t<!is_complex<T>, int> = 0> void print_matrix( const std::vector<T>& CPU_result, const std::vector<T>& GPU_result, int m, int n, int lda) { for(int i = 0; i < m; i++) for(int j = 0; j < n; j++) printf("matrix col %d, row %d, CPU result=%.8g, GPU result=%.8g\n", i, j, double(CPU_result[j + i * lda]), double(GPU_result[j + i * lda])); } /*! \brief Debugging purpose, print out CPU and GPU result matrix, valid for complex number */ template <typename T, std::enable_if_t<+is_complex<T>, int> = 0> void print_matrix( const std::vector<T>& CPU_result, const std::vector<T>& GPU_result, int m, int n, int lda) { for(int i = 0; i < m; i++) for(int j = 0; j < n; j++) printf("matrix col %d, row %d, CPU result=(%.8g,%.8g), GPU result=(%.8g,%.8g)\n", i, j, double(CPU_result[j + i * lda].real()), double(CPU_result[j + i * lda].imag()), double(GPU_result[j + i * lda].real()), double(GPU_result[j + i * lda].imag())); } /* =============================================================================================== */ /* ============================================================================================ */ // Return path of this executable std::string hipblas_exepath(); #endif // __cplusplus #ifdef __cplusplus extern "C" { #endif /* ============================================================================================ */ /* device query and print out their ID and name */ int query_device_property(); /* set current device to device_id */ void set_device(int device_id); /* get architecture number */ int getArch(); /* query what rocBLAS recommends for int8 layout. We are /always/ passing in the flag which * rocBLAS recommends, thus we need to know what layout to format our data in our tests. * returns true if should be packed. */ bool layout_pack_int8(); /* ============================================================================================ */ /* timing: HIP only provides very limited timers function clock() and not general; hipblas sync CPU and device and use more accurate CPU timer*/ /*! \brief CPU Timer(in microsecond): synchronize with the default device and return wall time */ double get_time_us(void); /*! \brief CPU Timer(in microsecond): synchronize with given queue/stream and return wall time */ double get_time_us_sync(hipStream_t stream); #ifdef __cplusplus } #endif /* ============================================================================================ */ #ifdef __cplusplus struct Arguments; /* ============================================================================================ */ /*! \brief local handle which is automatically created and destroyed */ class hipblasLocalHandle { hipblasHandle_t m_handle; void* m_memory = nullptr; public: hipblasLocalHandle(); explicit hipblasLocalHandle(const Arguments& arg); ~hipblasLocalHandle(); hipblasLocalHandle(const hipblasLocalHandle&) = delete; hipblasLocalHandle(hipblasLocalHandle&&) = delete; hipblasLocalHandle& operator=(const hipblasLocalHandle&) = delete; hipblasLocalHandle& operator=(hipblasLocalHandle&&) = delete; // Allow hipblasLocalHandle to be used anywhere hipblas_handle is expected operator hipblasHandle_t&() { return m_handle; } operator const hipblasHandle_t&() const { return m_handle; } }; #include "hipblas_arguments.hpp" #endif // __cplusplus
proc_frndstr.c
#include "q_incs.h" #include "_mmap.h" #define NODE_TYPE uint32_t #define MAXLINE 65535 int main( int argc, char **argv ) { int status = 0; char *infile; char *prefix; FILE *fp = NULL; FILE *lbfp = NULL; FILE *ubfp = NULL; FILE *connfp = NULL; char *buf = NULL; char line[MAXLINE+1]; NODE_TYPE *lbl = NULL; NODE_TYPE *lb = NULL; NODE_TYPE *ub = NULL; NODE_TYPE *conn = NULL; char *lb_X = NULL; size_t lb_nX = 0; char *ub_X = NULL; size_t ub_nX = 0; char *conn_X = NULL; size_t conn_nX = 0; NODE_TYPE idx = 0; if ( argc != 4 ) { go_BYE(-1); } infile = argv[1]; prefix = argv[2]; buf = malloc(strlen(prefix) + 16); if ( strcasecmp(argv[3], "y") == 0 ) { sprintf(buf, "%s_lb.bin", prefix); lbfp = fopen(buf, "wb"); return_if_fopen_failed(lbfp, buf, "wb"); sprintf(buf, "%s_ub.bin", prefix); ubfp = fopen(buf, "wb"); return_if_fopen_failed(ubfp, buf, "wb"); sprintf(buf, "%s_conn.bin", prefix); connfp = fopen(buf, "wb"); return_if_fopen_failed(connfp, buf, "wb"); fp = fopen(infile, "r"); return_if_fopen_failed(fp, infile, "r"); int lno = 0; for ( ; !feof(fp); lno++ ) { memset(line, '\0', MAXLINE+1); char *cptr = fgets(line, MAXLINE, fp); if ( line[MAXLINE-1] != '\0' ) { go_BYE(-1); } if ( cptr == NULL ) { break; } if ( strlen(line) == 0 ) { break; } char *xptr = strtok(line, ","); NODE_TYPE node_id = atoll(xptr); fwrite(&idx, sizeof(NODE_TYPE), 1, lbfp); for ( int i = 0; ; i++ ) { xptr = strtok(NULL, ","); if ( xptr == NULL ) { break; } node_id = atoll(xptr); fwrite(&node_id, sizeof(NODE_TYPE), 1, connfp); idx++; } fwrite(&idx, sizeof(NODE_TYPE), 1, ubfp); } fclose_if_non_null(fp); fclose_if_non_null(lbfp); fclose_if_non_null(ubfp); fprintf(stderr, "Read %d lines \n", lno); } sprintf(buf, "%s_lb.bin", prefix); status = rs_mmap(buf, &lb_X, &lb_nX, 0); cBYE(status); sprintf(buf, "%s_ub.bin", prefix); status = rs_mmap(buf, &ub_X, &ub_nX, 0); cBYE(status); sprintf(buf, "%s_conn.bin", prefix); status = rs_mmap(buf, &conn_X, &conn_nX, 0); cBYE(status); uint64_t n = lb_nX / sizeof(NODE_TYPE); fprintf(stderr, "Working on %ld nodes \n", n); lbl = malloc(n * sizeof(NODE_TYPE)); return_if_malloc_failed(lbl); for ( unsigned int i = 0; i < n; i++ ) { lbl[i] = i; } lb = (NODE_TYPE *)lb_X; ub = (NODE_TYPE *)ub_X; conn = (NODE_TYPE *)conn_X; int num_changed = -1; // just to get in the first tome for ( int iter = 0; num_changed != 0; iter++ ) { num_changed = 0; #pragma omp parallel for for ( uint64_t i = 0; i < n; i++ ) { if ( ub[i] <= lb[i] ) { continue; } NODE_TYPE minval = lbl[i]; for ( uint64_t j = lb[i]; j < ub[i]; j++ ) { minval = mcr_min(minval, lbl[conn[j]]); } if ( lbl[i] != minval ) { num_changed++; lbl[i] = minval; } } fprintf(stderr, "Pass %d, num_changed = %d \n", iter, num_changed); } BYE: if ( lb_X != NULL ) { munmap(lb_X, lb_nX); } if ( ub_X != NULL ) { munmap(ub_X, lb_nX); } if ( conn_X != NULL ) { munmap(conn_X, lb_nX); } free_if_non_null(buf); fclose_if_non_null(fp); fclose_if_non_null(lbfp); fclose_if_non_null(ubfp); fclose_if_non_null(connfp); return_if_malloc_failed(lbl); return status; }
mdc2_fmt_plug.c
/* * Cracker for MDC-2 (MDC-2DES) hashes. * * This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without# * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mdc2; #elif FMT_REGISTERS_H john_register_one(&fmt_mdc2); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 2048 // XXX #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #include "mdc2-JtR.h" #define FORMAT_LABEL "mdc2" #define FORMAT_NAME "MDC-2" #define FORMAT_TAG "$mdc2$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MDC-2DES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE 0 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$mdc2$000ed54e093d61679aefbeae05bfe33a", "The quick brown fox jumps over the lazy dog"}, {"775f59f8e51aec29c57ac6ab850d58e8", "The quick brown fox jumps over the lazy cog"}, {"52525252525252522525252525252525", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == BINARY_SIZE * 2; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p = ciphertext; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { JtR_MDC2_CTX ctx; JtR_MDC2_Init(&ctx); JtR_MDC2_Update(&ctx, (unsigned char*)saved_key[index], saved_len[index]); JtR_MDC2_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void mdc2_set_key(char *key, int index) { saved_len[index] = strlen(key); strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_mdc2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, mdc2_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
PVMappingFilterTime24h.h
/* * MIT License * * © ESI Group, 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef PVFILTER_PVMAPPINGFILTERTIME24H_H #define PVFILTER_PVMAPPINGFILTERTIME24H_H #include <inendi/PVMappingFilter.h> #include <pvkernel/rush/PVNraw.h> #include <boost/date_time/posix_time/posix_time.hpp> #include <unicode/calendar.h> #include <pvcop/types/datetime.h> #include <omp.h> using namespace icu_67; namespace Inendi { class PVMappingFilterTime24h : public PVMappingFilter { public: PVMappingFilterTime24h(); public: pvcop::db::array operator()(PVCol const col, PVRush::PVNraw const& nraw) override { auto f = nraw.column(col).formatter(); const pvcop::db::array& array = nraw.column(col); pvcop::db::array dest("number_uint32", array.size()); auto& dest_array = dest.to_core_array<uint32_t>(); if (std::string(f->name()) == "datetime") { auto& core_array = array.to_core_array<uint32_t>(); #pragma omp parallel for for (size_t row = 0; row < array.size(); row++) { tm local_tm; const time_t t = static_cast<int64_t>(core_array[row]); pvcop::types::formatter_datetime::gmtime_r(&t, &local_tm); dest_array[row] = (((local_tm.tm_hour * 60) + local_tm.tm_min) * 60 + local_tm.tm_sec) * 1000; } } else if (std::string(f->name()) == "datetime_us") { auto& core_array = array.to_core_array<boost::posix_time::ptime>(); #pragma omp parallel for for (size_t row = 0; row < array.size(); row++) { const boost::posix_time::ptime t = core_array[row]; const auto& tod = t.time_of_day(); dest_array[row] = (tod.total_seconds() * 1000) + (tod.fractional_seconds() / 1000); } } else { assert(std::string(f->name()) == "datetime_ms" && "Unknown datetime formatter"); auto& core_array = array.to_core_array<uint64_t>(); std::vector<std::unique_ptr<Calendar>> calendars; for (size_t i = 0; i < (size_t)omp_get_max_threads(); i++) { UErrorCode err = U_ZERO_ERROR; calendars.emplace_back(Calendar::createInstance(err)); if (not U_SUCCESS(err)) { throw std::runtime_error("Can't create calendar to compute mapping"); } } #pragma omp parallel { std::unique_ptr<Calendar>& cal = calendars[omp_get_thread_num()]; UErrorCode err = U_ZERO_ERROR; #pragma omp for for (size_t row = 0; row < array.size(); row++) { cal->setTime(core_array[row], err); if (not U_SUCCESS(err)) { continue; } int32_t millisec = cal->get(UCAL_MILLISECOND, err); if (not U_SUCCESS(err)) { continue; } int32_t sec = cal->get(UCAL_SECOND, err); if (not U_SUCCESS(err)) { continue; } int32_t min = cal->get(UCAL_MINUTE, err); if (not U_SUCCESS(err)) { continue; } int32_t hour = cal->get(UCAL_HOUR_OF_DAY, err); if (not U_SUCCESS(err)) { continue; } dest_array[row] = ((sec + (min * 60) + (hour * 60 * 60)) * 1000) + millisec; } } } return dest; } std::unordered_set<std::string> list_usable_type() const override { return {"time"}; } QString get_human_name() const override { return QString("24h"); } pvcop::db::array get_minmax(pvcop::db::array const&, pvcop::db::selection const&) const override { pvcop::db::array res("number_uint32", 2); auto res_array = res.to_core_array<uint32_t>(); res_array[0] = 0; res_array[1] = (24 * 60 * 60 * 1000) - 1; return res; } CLASS_FILTER_NOPARAM(PVMappingFilterTime24h) }; } #endif
LAGraph_Sort1.c
//------------------------------------------------------------------------------ // LAGraph_Sort1: sort a list of integers //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // Contributed by Tim Davis, Texas A&M University. //------------------------------------------------------------------------------ // A parallel mergesort of an array of n integers. #define LAGraph_FREE_ALL LAGraph_Free ((void **) &W) ; #include "LG_internal.h" //------------------------------------------------------------------------------ // prototype only needed for LAGraph_Sort1 //------------------------------------------------------------------------------ void LG_msort_1b_create_merge_tasks ( // output: int64_t *LG_RESTRICT L_task, // L_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT L_len, // L_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_task, // R_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_len, // R_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *LG_RESTRICT L_0, // Left = L [pL_start...pL_end-1] const int64_t pL_start, const int64_t pL_end, const int64_t *LG_RESTRICT R_0, // Right = R [pR_start...pR_end-1] const int64_t pR_start, const int64_t pR_end ) ; //------------------------------------------------------------------------------ // LG_msort_1b_binary_search: binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t LG_msort_1b_binary_search // return pleft ( const int64_t *LG_RESTRICT Y_0, // Pivot is Y [pivot] const int64_t pivot, const int64_t *LG_RESTRICT X_0, // search in X [p_start..p_end_-1] const int64_t p_start, const int64_t p_end ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // less = (X [pmiddle] < Pivot) bool less = LG_lt_1 (X_0, pmiddle, Y_0, pivot) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && LG_eq_1 (X_0, pleft, Y_0, pivot) ; // Modify pleft and pright: if (!found && (pleft == pright)) { if (LG_lt_1 (X_0, pleft, Y_0, pivot)) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // LG_msort_1b_create_merge_tasks //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. void LG_msort_1b_create_merge_tasks ( // output: int64_t *LG_RESTRICT L_task, // L_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT L_len, // L_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_task, // R_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_len, // R_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *LG_RESTRICT L_0, // Left = L [pL_start...pL_end-1] const int64_t pL_start, const int64_t pL_end, const int64_t *LG_RESTRICT R_0, // Right = R [pR_start...pR_end-1] const int64_t pR_start, const int64_t pR_end ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = LG_msort_1b_binary_search ( L_0, pleft, R_0, pR_start, pR_end) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = LG_msort_1b_binary_search ( R_0, pright, L_0, pL_start, pL_end) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = LAGraph_MAX (ntasks0, 1) ; ntasks0 = LAGraph_MIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. LG_msort_1b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, pL_start, pleft, R_0, pR_start, pright) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S LG_msort_1b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, pleft, pL_end, R_0, pright, pR_end) ; } } //------------------------------------------------------------------------------ // LG_msort_1b_merge: merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void LG_msort_1b_merge ( int64_t *LG_RESTRICT S_0, // output of length nleft + nright const int64_t *LG_RESTRICT Left_0, // left input of length nleft const int64_t nleft, const int64_t *LG_RESTRICT Right_0, // right input of length nright const int64_t nright ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { if (LG_lt_1 (Left_0, pleft, Right_0, pright)) { // S [p] = Left [pleft++] S_0 [p] = Left_0 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] S_0 [p] = Right_0 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // LAGraph_Sort1: parallel mergesort //------------------------------------------------------------------------------ int LAGraph_Sort1 // sort array A of size n ( int64_t *LG_RESTRICT A_0, // size n array const int64_t n, int nthreads, // # of threads to use char *msg ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; int64_t *LG_RESTRICT W = NULL ; LG_CHECK (A_0 == NULL, -1, "A_0 is NULL") ; //-------------------------------------------------------------------------- // handle small problems with a single thread //-------------------------------------------------------------------------- if (nthreads <= 1 || n <= LG_BASECASE) { // sequential quicksort LG_qsort_1a (A_0, n) ; return (0) ; } //-------------------------------------------------------------------------- // determine # of tasks //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 qsort leaves // 5 to 16 threads: 6 levels, 64 qsort leaves // 17 to 64 threads: 8 levels, 256 qsort leaves // 65 to 256 threads: 10 levels, 1024 qsort leaves // 256 to 1024 threads: 12 levels, 4096 qsort leaves // ... int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks = 1 << k ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = LAGraph_Malloc (n + 6*ntasks + 1, sizeof (int64_t)) ; LG_CHECK (W == NULL, -1, "out of memory") ; int64_t *T = W ; int64_t *LG_RESTRICT W_0 = T ; T += n ; int64_t *LG_RESTRICT L_task = T ; T += ntasks ; int64_t *LG_RESTRICT L_len = T ; T += ntasks ; int64_t *LG_RESTRICT R_task = T ; T += ntasks ; int64_t *LG_RESTRICT R_len = T ; T += ntasks ; int64_t *LG_RESTRICT S_task = T ; T += ntasks ; int64_t *LG_RESTRICT Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- LG_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; LG_qsort_1a (A_0 + leaf, leafsize) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for ( ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist LG_msort_1b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, Slice [tid], Slice [tid+nt], A_0, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; LG_msort_1b_merge ( W_0 + pS, A_0 + pL, nL, A_0 + pR, nR) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist LG_msort_1b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, Slice [tid], Slice [tid+nt], W_0, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; LG_msort_1b_merge ( A_0 + pS, W_0 + pL, nL, W_0 + pR, nR) ; } nt = 2*nt ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- LAGraph_FREE_ALL ; return (0) ; }
atomic_read_codegen.c
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics // REQUIRES: x86-registered-target #ifndef HEADER #define HEADER _Bool bv, bx; char cv, cx; unsigned char ucv, ucx; short sv, sx; unsigned short usv, usx; int iv, ix; unsigned int uiv, uix; long lv, lx; unsigned long ulv, ulx; long long llv, llx; unsigned long long ullv, ullx; float fv, fx; double dv, dx; long double ldv, ldx; _Complex int civ, cix; _Complex float cfv, cfx; _Complex double cdv, cdx; typedef int int4 __attribute__((__vector_size__(16))); int4 int4x; struct BitFields { int : 32; int a : 31; } bfx; struct BitFields_packed { int : 32; int a : 31; } __attribute__ ((__packed__)) bfx_packed; struct BitFields2 { int : 31; int a : 1; } bfx2; struct BitFields2_packed { int : 31; int a : 1; } __attribute__ ((__packed__)) bfx2_packed; struct BitFields3 { int : 11; int a : 14; } bfx3; struct BitFields3_packed { int : 11; int a : 14; } __attribute__ ((__packed__)) bfx3_packed; struct BitFields4 { short : 16; int a: 1; long b : 7; } bfx4; struct BitFields4_packed { short : 16; int a: 1; long b : 7; } __attribute__ ((__packed__)) bfx4_packed; typedef float float2 __attribute__((ext_vector_type(2))); float2 float2x; // Register "0" is currently an invalid register for global register variables. // Use "esp" instead of "0". // register int rix __asm__("0"); register int rix __asm__("esp"); // CHECK-LABEL: @main( int main(void) { // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1 // CHECK: store i8 #pragma omp atomic read bv = bx; // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1 // CHECK: store i8 #pragma omp atomic read cv = cx; // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1 // CHECK: store i8 #pragma omp atomic read ucv = ucx; // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2 // CHECK: store i16 #pragma omp atomic read sv = sx; // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2 // CHECK: store i16 #pragma omp atomic read usv = usx; // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4 // CHECK: store i32 #pragma omp atomic read iv = ix; // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4 // CHECK: store i32 #pragma omp atomic read uiv = uix; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i64 #pragma omp atomic read lv = lx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i64 #pragma omp atomic read ulv = ulx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i64 #pragma omp atomic read llv = llx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i64 #pragma omp atomic read ullv = ullx; // CHECK: load atomic i32, i32* bitcast (float* {{.*}} monotonic, align 4 // CHECK: bitcast i32 {{.*}} to float // CHECK: store float #pragma omp atomic read fv = fx; // CHECK: load atomic i64, i64* bitcast (double* {{.*}} monotonic, align 8 // CHECK: bitcast i64 {{.*}} to double // CHECK: store double #pragma omp atomic read dv = dx; // CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* {{.*}} monotonic, align 16 // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128* // CHECK: store i128 [[LD]], i128* [[BITCAST]] // CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]] // CHECK: store x86_fp80 [[LD]] #pragma omp atomic read ldv = ldx; // CHECK: call{{.*}} void @__atomic_load(i64 noundef 8, // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = cix; // CHECK: call{{.*}} void @__atomic_load(i64 noundef 8, // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = cfx; // CHECK: call{{.*}} void @__atomic_load(i64 noundef 16, // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store double // CHECK: store double #pragma omp atomic seq_cst read cdv = cdx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i8 #pragma omp atomic read bv = ulx; // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1 // CHECK: store i8 #pragma omp atomic read cv = bx; // CHECK: load atomic i8, i8* {{.*}} seq_cst, align 1 // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i8 #pragma omp atomic read seq_cst ucv = cx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i16 #pragma omp atomic read sv = ulx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i16 #pragma omp atomic read usv = lx; // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4 // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i32 #pragma omp atomic seq_cst, read iv = uix; // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4 // CHECK: store i32 #pragma omp atomic read uiv = ix; // CHECK: call{{.*}} void @__atomic_load(i64 noundef 8, // CHECK: store i64 #pragma omp atomic read lv = cix; // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4 // CHECK: store i64 #pragma omp atomic read ulv = fx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store i64 #pragma omp atomic read llv = dx; // CHECK: load atomic i128, i128* {{.*}} monotonic, align 16 // CHECK: store i64 #pragma omp atomic read ullv = ldx; // CHECK: call{{.*}} void @__atomic_load(i64 noundef 8, // CHECK: store float #pragma omp atomic read fv = cix; // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2 // CHECK: store double #pragma omp atomic read dv = sx; // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bx; // CHECK: load atomic i8, i8* {{.*}} monotonic, align 1 // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = bx; // CHECK: load atomic i16, i16* {{.*}} monotonic, align 2 // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = usx; // CHECK: load atomic i64, i64* {{.*}} monotonic, align 8 // CHECK: store double // CHECK: store double #pragma omp atomic read cdv = llx; // CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic, align 16 // CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128* // CHECK: store i128 [[I128VAL]], i128* [[I128PTR]] // CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: extractelement <4 x i32> [[LD]] // CHECK: store i8 #pragma omp atomic read bv = int4x[0]; // CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic, align 4 // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0) // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx_packed.a; // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic, align 4 // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: ashr i32 [[LD]], 31 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2.a; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic, align 1 // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: ashr i8 [[LD]], 7 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2_packed.a; // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic, align 4 // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7 // CHECK: ashr i32 [[SHL]], 18 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0) // CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10 // CHECK: sext i24 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3_packed.a; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8 // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63 // CHECK: trunc i64 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4.a; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic, align 1 // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7 // CHECK: sext i8 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic relaxed read ldv = bfx4_packed.a; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8 // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57 // CHECK: store x86_fp80 #pragma omp atomic read relaxed ldv = bfx4.b; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) acquire, align 1 // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1 // CHECK: sext i8 [[ASHR]] to i64 // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store x86_fp80 #pragma omp atomic read acquire ldv = bfx4_packed.b; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic, align 8 // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64* // CHECK: store i64 [[LD]], i64* [[BITCAST]] // CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: extractelement <2 x float> [[LD]] // CHECK: store i64 #pragma omp atomic read ulv = float2x.x; // CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store double #pragma omp atomic read seq_cst dv = rix; return 0; } #endif
GB_unop__one_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_bool_bool // op(A') function: GB_unop_tran__one_bool_bool // C type: bool // A type: bool // cast: ; // unaryop: cij = true #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = true ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_bool_bool ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = true ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = true ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_fp32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_int32 // op(A') function: GB_tran__identity_fp32_int32 // C type: float // A type: int32_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_int32 ( float *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cp-tree.h
/* Definitions for C++ parsing and type checking. Copyright (C) 1987-2020 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "tm.h" #include "hard-reg-set.h" #include "function.h" /* In order for the format checking to accept the C++ front end diagnostic framework extensions, you must include this file before diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) #error \ In order for the format checking to accept the C++ front end diagnostic \ framework extensions, you must include this file before diagnostic-core.h and \ c-common.h, not after. #endif #include "c-family/c-common.h" #include "diagnostic.h" /* A tree node, together with a location, so that we can track locations (and ranges) during parsing. The location is redundant for node kinds that have locations, but not all node kinds do (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ extern location_t cp_expr_location (const_tree); class cp_expr { public: cp_expr () : m_value (NULL), m_loc (UNKNOWN_LOCATION) {} cp_expr (tree value) : m_value (value), m_loc (cp_expr_location (m_value)) {} cp_expr (tree value, location_t loc): m_value (value), m_loc (loc) { protected_set_expr_location (value, loc); } /* Implicit conversions to tree. */ operator tree () const { return m_value; } tree & operator* () { return m_value; } tree operator* () const { return m_value; } tree & operator-> () { return m_value; } tree operator-> () const { return m_value; } tree get_value () const { return m_value; } location_t get_location () const { return m_loc; } location_t get_start () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_start; } location_t get_finish () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_finish; } void set_location (location_t loc) { protected_set_expr_location (m_value, loc); m_loc = loc; } void set_range (location_t start, location_t finish) { set_location (make_location (m_loc, start, finish)); } cp_expr& maybe_add_location_wrapper () { m_value = maybe_wrap_with_location (m_value, m_loc); return *this; } private: tree m_value; location_t m_loc; }; inline bool operator == (const cp_expr &lhs, tree rhs) { return lhs.get_value () == rhs; } enum cp_tree_index { CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_INIT_LIST_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_GLOBAL, CPTI_GLOBAL_TYPE, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_AGGR_TAG, CPTI_CONV_OP_MARKER, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_CONV_OP_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_GLOBAL_IDENTIFIER, CPTI_ANON_IDENTIFIER, CPTI_AUTO_IDENTIFIER, CPTI_DECLTYPE_AUTO_IDENTIFIER, CPTI_INIT_LIST_IDENTIFIER, CPTI_FOR_RANGE__IDENTIFIER, CPTI_FOR_BEGIN__IDENTIFIER, CPTI_FOR_END__IDENTIFIER, CPTI_FOR_RANGE_IDENTIFIER, CPTI_FOR_BEGIN_IDENTIFIER, CPTI_FOR_END_IDENTIFIER, CPTI_ABI_TAG_IDENTIFIER, CPTI_ALIGNED_IDENTIFIER, CPTI_BEGIN_IDENTIFIER, CPTI_END_IDENTIFIER, CPTI_GET_IDENTIFIER, CPTI_GNU_IDENTIFIER, CPTI_TUPLE_ELEMENT_IDENTIFIER, CPTI_TUPLE_SIZE_IDENTIFIER, CPTI_TYPE_IDENTIFIER, CPTI_VALUE_IDENTIFIER, CPTI_FUN_IDENTIFIER, CPTI_CLOSURE_IDENTIFIER, CPTI_HEAP_UNINIT_IDENTIFIER, CPTI_HEAP_IDENTIFIER, CPTI_HEAP_DELETED_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_EMPTY_EXCEPT_SPEC, CPTI_NOEXCEPT_TRUE_SPEC, CPTI_NOEXCEPT_FALSE_SPEC, CPTI_NOEXCEPT_DEFERRED_SPEC, CPTI_TERMINATE_FN, CPTI_CALL_UNEXPECTED_FN, CPTI_GET_EXCEPTION_PTR_FN, CPTI_BEGIN_CATCH_FN, CPTI_END_CATCH_FN, CPTI_ALLOCATE_EXCEPTION_FN, CPTI_FREE_EXCEPTION_FN, CPTI_THROW_FN, CPTI_RETHROW_FN, CPTI_ATEXIT_FN_PTR_TYPE, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_NULLPTR, CPTI_NULLPTR_TYPE, CPTI_ALIGN_TYPE, CPTI_ANY_TARG, CPTI_SOURCE_LOCATION_IMPL, CPTI_FALLBACK_DFLOAT32_TYPE, CPTI_FALLBACK_DFLOAT64_TYPE, CPTI_FALLBACK_DFLOAT128_TYPE, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define global_namespace cp_global_trees[CPTI_GLOBAL] #define global_type_node cp_global_trees[CPTI_GLOBAL_TYPE] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] #define nullptr_node cp_global_trees[CPTI_NULLPTR] #define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] /* std::align_val_t */ #define align_type_node cp_global_trees[CPTI_ALIGN_TYPE] /* We cache these tree nodes so as to call get_identifier less frequently. For identifiers for functions, including special member functions such as ctors and assignment operators, the nodes can be used (among other things) to iterate over their overloads defined by/for a type. For example: tree ovlid = assign_op_identifier; tree overloads = get_class_binding (type, ovlid); for (ovl_iterator it (overloads); it; ++it) { ... } iterates over the set of implicitly and explicitly defined overloads of the assignment operator for type (including the copy and move assignment operators, whether deleted or not). */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] /* The name used for conversion operators -- but note that actual conversion functions use special identifiers outside the identifier table. */ #define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the ::, std & anon namespaces. */ #define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER] #define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER] /* auto and declspec(auto) identifiers. */ #define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER] #define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER] #define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER] #define for_range__identifier cp_global_trees[CPTI_FOR_RANGE__IDENTIFIER] #define for_begin__identifier cp_global_trees[CPTI_FOR_BEGIN__IDENTIFIER] #define for_end__identifier cp_global_trees[CPTI_FOR_END__IDENTIFIER] #define for_range_identifier cp_global_trees[CPTI_FOR_RANGE_IDENTIFIER] #define for_begin_identifier cp_global_trees[CPTI_FOR_BEGIN_IDENTIFIER] #define for_end_identifier cp_global_trees[CPTI_FOR_END_IDENTIFIER] #define abi_tag_identifier cp_global_trees[CPTI_ABI_TAG_IDENTIFIER] #define aligned_identifier cp_global_trees[CPTI_ALIGNED_IDENTIFIER] #define begin_identifier cp_global_trees[CPTI_BEGIN_IDENTIFIER] #define end_identifier cp_global_trees[CPTI_END_IDENTIFIER] #define get__identifier cp_global_trees[CPTI_GET_IDENTIFIER] #define gnu_identifier cp_global_trees[CPTI_GNU_IDENTIFIER] #define tuple_element_identifier cp_global_trees[CPTI_TUPLE_ELEMENT_IDENTIFIER] #define tuple_size_identifier cp_global_trees[CPTI_TUPLE_SIZE_IDENTIFIER] #define type_identifier cp_global_trees[CPTI_TYPE_IDENTIFIER] #define value_identifier cp_global_trees[CPTI_VALUE_IDENTIFIER] #define fun_identifier cp_global_trees[CPTI_FUN_IDENTIFIER] #define closure_identifier cp_global_trees[CPTI_CLOSURE_IDENTIFIER] #define heap_uninit_identifier cp_global_trees[CPTI_HEAP_UNINIT_IDENTIFIER] #define heap_identifier cp_global_trees[CPTI_HEAP_IDENTIFIER] #define heap_deleted_identifier cp_global_trees[CPTI_HEAP_DELETED_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] /* Exception specifiers used for throw(), noexcept(true), noexcept(false) and deferred noexcept. We rely on these being uncloned. */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] #define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] #define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] #define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC] /* Exception handling function declarations. */ #define terminate_fn cp_global_trees[CPTI_TERMINATE_FN] #define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN] #define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN] #define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN] #define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN] #define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN] #define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN] #define throw_fn cp_global_trees[CPTI_THROW_FN] #define rethrow_fn cp_global_trees[CPTI_RETHROW_FN] /* The type of the function-pointer argument to "__cxa_atexit" (or "std::atexit", if "__cxa_atexit" is not being used). */ #define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A node which matches any template argument. */ #define any_targ_node cp_global_trees[CPTI_ANY_TARG] /* std::source_location::__impl class. */ #define source_location_impl cp_global_trees[CPTI_SOURCE_LOCATION_IMPL] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node /* Variant of dfloat{32,64,128}_type_node only used for fundamental rtti purposes if DFP is disabled. */ #define fallback_dfloat32_type cp_global_trees[CPTI_FALLBACK_DFLOAT32_TYPE] #define fallback_dfloat64_type cp_global_trees[CPTI_FALLBACK_DFLOAT64_TYPE] #define fallback_dfloat128_type cp_global_trees[CPTI_FALLBACK_DFLOAT128_TYPE] #include "name-lookup.h" /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). COND_EXPR_IS_VEC_DELETE (in COND_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF) PAREN_STRING_LITERAL (in STRING_CST) CP_DECL_THREAD_LOCAL_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE, and OMP_TASKLOOP) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag) LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST) CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) DECL_OVERRIDE_P (in FUNCTION_DECL) IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR) TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR) CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR) PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION) TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO) SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR) COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ) WILDCARD_PACK_P (in WILDCARD_DECL) BLOCK_OUTER_CURLY_BRACE_P (in BLOCK) FOLD_EXPR_MODOP_P (*_FOLD_EXPR) IF_STMT_CONSTEXPR_P (IF_STMT) TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM) DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL) SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT) REINTERPRET_CAST_P (in NOP_EXPR) ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR) OVL_DEDUP_P (in OVERLOAD) 1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) DECL_FINAL_P (in FUNCTION_DECL) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) CONSTRUCTOR_IS_DEPENDENT (in CONSTRUCTOR) TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO) PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION) OVL_USING_P (in OVERLOAD) IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR) 2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE) TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) FNDECL_USED_AUTO (in FUNCTION_DECL) DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE) REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF, VIEW_CONVERT_EXPR) AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR) CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR) OVL_HIDDEN_P (in OVERLOAD) SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT) LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR) IMPLICIT_CONV_EXPR_BRACED_INIT (in IMPLICIT_CONV_EXPR) TINFO_VAR_DECLARED_CONSTINIT (in TEMPLATE_INFO) CALL_FROM_NEW_OR_DELETE_P (in CALL_EXPR) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE) CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR) OVL_NESTED_P (in OVERLOAD) LAMBDA_EXPR_INSTANTIATED (in LAMBDA_EXPR) Reserved for DECL_MODULE_EXPORT (in DECL_) 4: IDENTIFIER_MARKED (IDENTIFIER_NODEs) TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, CALL_EXPR, or FIELD_DECL). DECL_TINFO_P (in VAR_DECL) FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) OVL_LOOKUP_P (in OVERLOAD) LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, NAMESPACE_DECL) 5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR) 6: TYPE_MARKED_P (in _TYPE) DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL) RANGE_FOR_IVDEP (in RANGE_FOR_STMT) CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR) CONSTRUCTOR_IS_DESIGNATED_INIT (in CONSTRUCTOR) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_USER_CONSTRUCTOR. 2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE) TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE) 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM) 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) LABEL_DECL_BREAK (in LABEL_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) USING_DECL_TYPENAME_P (in USING_DECL) DECL_VLA_CAPTURE_P (in FIELD_DECL) DECL_ARRAY_PARAMETER_P (in PARM_DECL) LABEL_DECL_CONTINUE (in LABEL_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) DECL_CONSTRAINT_VAR_P (in a PARM_DECL) TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL) DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL) LABEL_DECL_CDTOR (in LABEL_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_ANON_UNION_VAR_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) TYPE_DECL_ALIAS_P (in TYPE_DECL) 7: DECL_THUNK_P (in a member FUNCTION_DECL) DECL_NORMAL_CAPTURE_P (in FIELD_DECL) 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS. For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE. For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE, RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO, BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. If BV_LOST_PRIMARY is set, it means that this entry is for a lost primary virtual base and can be left null in the vtable. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) /* Returns t iff the node can have a TEMPLATE_INFO field. */ inline tree template_info_decl_check (const_tree t, const char* f, int l, const char* fn) { switch (TREE_CODE (t)) { case VAR_DECL: case FUNCTION_DECL: case FIELD_DECL: case TYPE_DECL: case CONCEPT_DECL: case TEMPLATE_DECL: return const_cast<tree>(t); default: break; } tree_check_failed (t, f, l, fn, VAR_DECL, FUNCTION_DECL, FIELD_DECL, TYPE_DECL, CONCEPT_DECL, TEMPLATE_DECL, 0); gcc_unreachable (); } #define TEMPLATE_INFO_DECL_CHECK(NODE) \ template_info_decl_check ((NODE), __FILE__, __LINE__, __FUNCTION__) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ __typeof (NODE) const __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->u.fn.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else /* ENABLE_TREE_CHECKING */ #define TEMPLATE_INFO_DECL_CHECK(NODE) (NODE) #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* ENABLE_TREE_CHECKING */ /* Language-dependent contents of an identifier. */ struct GTY(()) lang_identifier { struct c_common_identifier c_common; cxx_binding *bindings; }; /* Return a typed pointer version of T if it designates a C++ front-end identifier. */ inline lang_identifier* identifier_p (tree t) { if (TREE_CODE (t) == IDENTIFIER_NODE) return (lang_identifier*) t; return NULL; } #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct GTY(()) template_parm_index { struct tree_common common; int index; int level; int orig_level; tree decl; }; struct GTY(()) ptrmem_cst { struct tree_common common; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \ || LAMBDA_FUNCTION_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Mark the outer curly brace BLOCK. */ #define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) #define COND_EXPR_IS_VEC_DELETE(NODE) \ TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE)) /* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions are not constexprs. Other NOP_EXPRs are. */ #define REINTERPRET_CAST_P(NODE) \ TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE)) \ && flag_hosted) /* Lookup walker marking. */ #define LOOKUP_SEEN_P(NODE) TREE_VISITED(NODE) #define LOOKUP_FOUND_P(NODE) \ TREE_LANG_FLAG_4 (TREE_CHECK3(NODE,RECORD_TYPE,UNION_TYPE,NAMESPACE_DECL)) /* These two accessors should only be used by OVL manipulators. Other users should use iterators and convenience functions. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain) /* If set, this or a subsequent overload contains decls that need deduping. */ #define OVL_DEDUP_P(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE)) /* If set, this was imported in a using declaration. */ #define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE)) /* If set, this overload is a hidden decl. */ #define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE)) /* If set, this overload contains a nested overload. */ #define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE)) /* If set, this overload was constructed during lookup. */ #define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE)) /* The first decl of an overload. */ #define OVL_FIRST(NODE) ovl_first (NODE) /* The name of the overload set. */ #define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE)) /* Whether this is a set of overloaded functions. TEMPLATE_DECLS are always wrapped in an OVERLOAD, so we don't need to check them here. */ #define OVL_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD) /* Whether this is a single member overload. */ #define OVL_SINGLE_P(NODE) \ (TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE)) /* OVL_HIDDEN_P nodes come before other nodes. */ struct GTY(()) tree_overload { struct tree_common common; tree function; }; /* Iterator for a 1 dimensional overload. Permits iterating over the outer level of a 2-d overload when explicitly enabled. */ class ovl_iterator { tree ovl; const bool allow_inner; /* Only used when checking. */ public: explicit ovl_iterator (tree o, bool allow = false) : ovl (o), allow_inner (allow) { } private: /* Do not duplicate. */ ovl_iterator &operator= (const ovl_iterator &); ovl_iterator (const ovl_iterator &); public: operator bool () const { return ovl; } ovl_iterator &operator++ () { ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl); return *this; } tree operator* () const { tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl); /* Check this is not an unexpected 2-dimensional overload. */ gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD); return fn; } public: /* Whether this overload was introduced by a using decl. */ bool using_p () const { return (TREE_CODE (ovl) == USING_DECL || (TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl))); } bool hidden_p () const { return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl); } public: tree remove_node (tree head) { return remove_node (head, ovl); } tree reveal_node (tree head) { return reveal_node (head, ovl); } protected: /* If we have a nested overload, point at the inner overload and return the next link on the outer one. */ tree maybe_push () { tree r = NULL_TREE; if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl)) { r = OVL_CHAIN (ovl); ovl = OVL_FUNCTION (ovl); } return r; } /* Restore an outer nested overload. */ void pop (tree outer) { gcc_checking_assert (!ovl); ovl = outer; } private: /* We make these static functions to avoid the address of the iterator escaping the local context. */ static tree remove_node (tree head, tree node); static tree reveal_node (tree ovl, tree node); }; /* Iterator over a (potentially) 2 dimensional overload, which is produced by name lookup. */ class lkp_iterator : public ovl_iterator { typedef ovl_iterator parent; tree outer; public: explicit lkp_iterator (tree o) : parent (o, true), outer (maybe_push ()) { } public: lkp_iterator &operator++ () { bool repush = !outer; if (!parent::operator++ () && !repush) { pop (outer); repush = true; } if (repush) outer = maybe_push (); return *this; } }; /* hash traits for declarations. Hashes potential overload sets via DECL_NAME. */ struct named_decl_hash : ggc_remove <tree> { typedef tree value_type; /* A DECL or OVERLOAD */ typedef tree compare_type; /* An identifier. */ inline static hashval_t hash (const value_type decl); inline static bool equal (const value_type existing, compare_type candidate); static const bool empty_zero_p = true; static inline void mark_empty (value_type &p) {p = NULL_TREE;} static inline bool is_empty (value_type p) {return !p;} /* Nothing is deletable. Everything is insertable. */ static bool is_deleted (value_type) { return false; } static void mark_deleted (value_type) { gcc_unreachable (); } }; /* Simplified unique_ptr clone to release a tree vec on exit. */ class releasing_vec { public: typedef vec<tree, va_gc> vec_t; releasing_vec (vec_t *v): v(v) { } releasing_vec (): v(make_tree_vector ()) { } /* Copy ops are deliberately declared but not defined, copies must always be elided. */ releasing_vec (const releasing_vec &); releasing_vec &operator= (const releasing_vec &); vec_t &operator* () const { return *v; } vec_t *operator-> () const { return v; } vec_t *get() const { return v; } operator vec_t *() const { return v; } vec_t ** operator& () { return &v; } /* Breaks pointer/value consistency for convenience. */ tree& operator[] (unsigned i) const { return (*v)[i]; } ~releasing_vec() { release_tree_vector (v); } private: vec_t *v; }; /* Forwarding functions for vec_safe_* that might reallocate. */ inline tree* vec_safe_push (releasing_vec& r, const tree &t CXX_MEM_STAT_INFO) { return vec_safe_push (*&r, t PASS_MEM_STAT); } inline bool vec_safe_reserve (releasing_vec& r, unsigned n, bool e = false CXX_MEM_STAT_INFO) { return vec_safe_reserve (*&r, n, e PASS_MEM_STAT); } inline unsigned vec_safe_length (releasing_vec &r) { return r->length(); } inline void vec_safe_splice (releasing_vec &r, vec<tree, va_gc> *p CXX_MEM_STAT_INFO) { vec_safe_splice (*&r, p PASS_MEM_STAT); } void release_tree_vector (releasing_vec &); // cause link error struct GTY(()) tree_template_decl { struct tree_decl_common common; tree arguments; tree result; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base in which lookup found the BASELINK_FUNCTIONS. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* If T is a BASELINK, grab the functions, otherwise just T, which is expected to already be a (list of) functions. */ #define MAYBE_BASELINK_FUNCTIONS(T) \ (BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED }; /* The various kinds of C++0x warnings we encounter. */ enum cpp0x_warn_str { /* extended initializer lists */ CPP0X_INITIALIZER_LISTS, /* explicit conversion operators */ CPP0X_EXPLICIT_CONVERSION, /* variadic templates */ CPP0X_VARIADIC_TEMPLATES, /* lambda expressions */ CPP0X_LAMBDA_EXPR, /* C++0x auto */ CPP0X_AUTO, /* scoped enums */ CPP0X_SCOPED_ENUMS, /* defaulted and deleted functions */ CPP0X_DEFAULTED_DELETED, /* inline namespaces */ CPP0X_INLINE_NAMESPACES, /* override controls, override/final */ CPP0X_OVERRIDE_CONTROLS, /* non-static data member initializers */ CPP0X_NSDMI, /* user defined literals */ CPP0X_USER_DEFINED_LITERALS, /* delegating constructors */ CPP0X_DELEGATING_CTORS, /* inheriting constructors */ CPP0X_INHERITING_CTORS, /* C++11 attributes */ CPP0X_ATTRIBUTES, /* ref-qualified member functions */ CPP0X_REF_QUALIFIER }; /* The various kinds of operation used by composite_pointer_type. */ enum composite_pointer_operation { /* comparison */ CPO_COMPARISON, /* conversion */ CPO_CONVERSION, /* conditional expression */ CPO_CONDITIONAL_EXPR }; /* Possible cases of expression list used by build_x_compound_expr_from_list. */ enum expr_list_kind { ELK_INIT, /* initializer */ ELK_MEM_INIT, /* member initializer */ ELK_FUNC_CAST /* functional cast */ }; /* Possible cases of implicit bad rhs conversions. */ enum impl_conv_rhs { ICR_DEFAULT_ARGUMENT, /* default argument */ ICR_CONVERTING, /* converting */ ICR_INIT, /* initialization */ ICR_ARGPASS, /* argument passing */ ICR_RETURN, /* return */ ICR_ASSIGN /* assignment */ }; /* Possible cases of implicit or explicit bad conversions to void. */ enum impl_conv_void { ICV_CAST, /* (explicit) conversion to void */ ICV_SECOND_OF_COND, /* second operand of conditional expression */ ICV_THIRD_OF_COND, /* third operand of conditional expression */ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ ICV_LEFT_OF_COMMA, /* left operand of comma operator */ ICV_STATEMENT, /* statement */ ICV_THIRD_IN_FOR /* for increment expression */ }; /* Possible invalid uses of an abstract class that might not have a specific associated declaration. */ enum GTY(()) abstract_class_use { ACU_UNKNOWN, /* unknown or decl provided */ ACU_CAST, /* cast to abstract class */ ACU_NEW, /* new-expression of abstract class */ ACU_THROW, /* throw-expression of abstract class */ ACU_CATCH, /* catch-parameter of abstract class */ ACU_ARRAY, /* array of abstract class */ ACU_RETURN, /* return type of abstract class */ ACU_PARM /* parameter type of abstract class */ }; /* Macros for access to language-specific slots in an identifier. */ /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. Its PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) /* Kinds of identifiers. Values are carefully chosen. */ enum cp_identifier_kind { cik_normal = 0, /* Not a special identifier. */ cik_keyword = 1, /* A keyword. */ cik_ctor = 2, /* Constructor (in-chg, complete or base). */ cik_dtor = 3, /* Destructor (in-chg, deleting, complete or base). */ cik_simple_op = 4, /* Non-assignment operator name. */ cik_assign_op = 5, /* An assignment operator name. */ cik_conv_op = 6, /* Conversion operator name. */ cik_reserved_for_udlit = 7, /* Not yet in use */ cik_max }; /* Kind bits. */ #define IDENTIFIER_KIND_BIT_0(NODE) \ TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE)) #define IDENTIFIER_KIND_BIT_1(NODE) \ TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE)) #define IDENTIFIER_KIND_BIT_2(NODE) \ TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE)) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) \ TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE)) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) \ TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE)) /* True if this identifier is a reserved word. C_RID_CODE (node) is then the RID_* value of the keyword. Value 1. */ #define IDENTIFIER_KEYWORD_P(NODE) \ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ & (!IDENTIFIER_KIND_BIT_1 (NODE)) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is the name of a constructor or destructor. Value 2 or 3. */ #define IDENTIFIER_CDTOR_P(NODE) \ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ & IDENTIFIER_KIND_BIT_1 (NODE)) /* True if this identifier is the name of a constructor. Value 2. */ #define IDENTIFIER_CTOR_P(NODE) \ (IDENTIFIER_CDTOR_P(NODE) \ & (!IDENTIFIER_KIND_BIT_0 (NODE))) /* True if this identifier is the name of a destructor. Value 3. */ #define IDENTIFIER_DTOR_P(NODE) \ (IDENTIFIER_CDTOR_P(NODE) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is for any operator name (including conversions). Value 4, 5, 6 or 7. */ #define IDENTIFIER_ANY_OP_P(NODE) \ (IDENTIFIER_KIND_BIT_2 (NODE)) /* True if this identifier is for an overloaded operator. Values 4, 5. */ #define IDENTIFIER_OVL_OP_P(NODE) \ (IDENTIFIER_ANY_OP_P (NODE) \ & (!IDENTIFIER_KIND_BIT_1 (NODE))) /* True if this identifier is for any assignment. Values 5. */ #define IDENTIFIER_ASSIGN_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is the name of a type-conversion operator. Value 7. */ #define IDENTIFIER_CONV_OP_P(NODE) \ (IDENTIFIER_ANY_OP_P (NODE) \ & IDENTIFIER_KIND_BIT_1 (NODE) \ & (!IDENTIFIER_KIND_BIT_0 (NODE))) /* True if this identifier is a new or delete operator. */ #define IDENTIFIER_NEWDEL_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ && IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC) /* True if this identifier is a new operator. */ #define IDENTIFIER_NEW_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ && (IDENTIFIER_OVL_OP_FLAGS (NODE) \ & (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC) /* Access a C++-specific index for identifier NODE. Used to optimize operator mappings etc. */ #define IDENTIFIER_CP_INDEX(NODE) \ (IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the unparsed operand. */ #define DEFPARSE_TOKENS(NODE) \ (((struct tree_deferred_parse *)DEFERRED_PARSE_CHECK (NODE))->tokens) #define DEFPARSE_INSTANTIATIONS(NODE) \ (((struct tree_deferred_parse *)DEFERRED_PARSE_CHECK (NODE))->instantiations) struct GTY (()) tree_deferred_parse { struct tree_base base; struct cp_token_cache *tokens; vec<tree, va_gc> *instantiations; }; #define DEFERRED_NOEXCEPT_PATTERN(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern) #define DEFERRED_NOEXCEPT_ARGS(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args) #define DEFERRED_NOEXCEPT_SPEC_P(NODE) \ ((NODE) && (TREE_PURPOSE (NODE)) \ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT)) #define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \ (DEFERRED_NOEXCEPT_SPEC_P (NODE) \ && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE) #define UNPARSED_NOEXCEPT_SPEC_P(NODE) \ ((NODE) && (TREE_PURPOSE (NODE)) \ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_PARSE)) struct GTY (()) tree_deferred_noexcept { struct tree_base base; tree pattern; tree args; }; /* The condition associated with the static assertion. This must be an integral constant expression. */ #define STATIC_ASSERT_CONDITION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) /* The message associated with the static assertion. This must be a string constant, which will be emitted as an error message when the static assert condition is false. */ #define STATIC_ASSERT_MESSAGE(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) /* Source location information for a static assertion. */ #define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) struct GTY (()) tree_static_assert { struct tree_common common; tree condition; tree message; location_t location; }; struct GTY (()) tree_argument_pack_select { struct tree_common common; tree argument_pack; int index; }; /* The different kinds of traits that we encounter. */ enum cp_trait_kind { CPTK_BASES, CPTK_DIRECT_BASES, CPTK_HAS_NOTHROW_ASSIGN, CPTK_HAS_NOTHROW_CONSTRUCTOR, CPTK_HAS_NOTHROW_COPY, CPTK_HAS_TRIVIAL_ASSIGN, CPTK_HAS_TRIVIAL_CONSTRUCTOR, CPTK_HAS_TRIVIAL_COPY, CPTK_HAS_TRIVIAL_DESTRUCTOR, CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS, CPTK_HAS_VIRTUAL_DESTRUCTOR, CPTK_IS_ABSTRACT, CPTK_IS_AGGREGATE, CPTK_IS_BASE_OF, CPTK_IS_CLASS, CPTK_IS_EMPTY, CPTK_IS_ENUM, CPTK_IS_FINAL, CPTK_IS_LITERAL_TYPE, CPTK_IS_POD, CPTK_IS_POLYMORPHIC, CPTK_IS_SAME_AS, CPTK_IS_STD_LAYOUT, CPTK_IS_TRIVIAL, CPTK_IS_TRIVIALLY_ASSIGNABLE, CPTK_IS_TRIVIALLY_CONSTRUCTIBLE, CPTK_IS_TRIVIALLY_COPYABLE, CPTK_IS_UNION, CPTK_UNDERLYING_TYPE, CPTK_IS_ASSIGNABLE, CPTK_IS_CONSTRUCTIBLE }; /* The types that we are processing. */ #define TRAIT_EXPR_TYPE1(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) #define TRAIT_EXPR_TYPE2(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) /* The specific trait that we are processing. */ #define TRAIT_EXPR_KIND(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) #define TRAIT_EXPR_LOCATION(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->locus) struct GTY (()) tree_trait_expr { struct tree_common common; tree type1; tree type2; location_t locus; enum cp_trait_kind kind; }; /* Identifiers used for lambda types are almost anonymous. Use this spare flag to distinguish them (they also have the anonymous flag). */ #define IDENTIFIER_LAMBDA_P(NODE) \ (IDENTIFIER_NODE_CHECK(NODE)->base.protected_flag) /* Based off of TYPE_UNNAMED_P. */ #define LAMBDA_TYPE_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_LINKAGE_IDENTIFIER (NODE) \ && IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* Test if FUNCTION_DECL is a lambda function. */ #define LAMBDA_FUNCTION_P(FNDECL) \ (DECL_DECLARES_FUNCTION_P (FNDECL) \ && DECL_OVERLOADED_OPERATOR_P (FNDECL) \ && DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) enum cp_lambda_default_capture_mode_type { CPLD_NONE, CPLD_COPY, CPLD_REFERENCE }; /* The method of default capture, if any. */ #define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) /* The capture-list, including `this'. Each capture is stored as a FIELD_DECL * so that the name, type, and field are all together, whether or not it has * been added to the lambda's class type. TREE_LIST: TREE_PURPOSE: The FIELD_DECL for this capture. TREE_VALUE: The initializer. This is part of a GNU extension. */ #define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) /* During parsing of the lambda-introducer, the node in the capture-list that holds the 'this' capture. During parsing of the body, the capture proxy for that node. */ #define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) /* Predicate tracking whether `this' is in the effective capture set. */ #define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ LAMBDA_EXPR_THIS_CAPTURE(NODE) /* Predicate tracking whether the lambda was declared 'mutable'. */ #define LAMBDA_EXPR_MUTABLE_P(NODE) \ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) /* True iff uses of a const variable capture were optimized away. */ #define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \ TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE)) /* True iff this LAMBDA_EXPR was generated in tsubst_lambda_expr. */ #define LAMBDA_EXPR_INSTANTIATED(NODE) \ TREE_LANG_FLAG_3 (LAMBDA_EXPR_CHECK (NODE)) /* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit capture. */ #define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* The source location of the lambda. */ #define LAMBDA_EXPR_LOCATION(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) /* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ #define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) /* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ #define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) /* During parsing of the lambda, a vector of capture proxies which need to be pushed once we're done processing a nested lambda. */ #define LAMBDA_EXPR_PENDING_PROXIES(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies) /* The closure type of the lambda, which is also the type of the LAMBDA_EXPR. */ #define LAMBDA_EXPR_CLOSURE(NODE) \ (TREE_TYPE (LAMBDA_EXPR_CHECK (NODE))) struct GTY (()) tree_lambda_expr { struct tree_typed typed; tree capture_list; tree this_capture; tree extra_scope; vec<tree, va_gc> *pending_proxies; location_t locus; enum cp_lambda_default_capture_mode_type default_capture_mode; int discriminator; }; /* A (typedef,context,usage location) triplet. It represents a typedef used through a context at a given source location. e.g. struct foo { typedef int myint; }; struct bar { foo::myint v; // #1<-- this location. }; In bar, the triplet will be (myint, foo, #1). */ struct GTY(()) qualified_typedef_usage_s { tree typedef_decl; tree context; location_t locus; }; typedef struct qualified_typedef_usage_s qualified_typedef_usage_t; /* Non-zero if this template specialization has access violations that should be rechecked when the function is instantiated outside argument deduction. */ #define TINFO_HAS_ACCESS_ERRORS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE))) #define FNDECL_HAS_ACCESS_ERRORS(NODE) \ (TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE))) /* Non-zero if this variable template specialization was specified using a template-id, so it's a partial or full specialization and not a definition of the member template of a particular class specialization. */ #define TINFO_USED_TEMPLATE_ID(NODE) \ (TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE))) /* Non-zero if this variable template specialization was declared with the `constinit' specifier. */ #define TINFO_VAR_DECLARED_CONSTINIT(NODE) \ (TREE_LANG_FLAG_2 (TEMPLATE_INFO_CHECK (NODE))) struct GTY(()) tree_template_info { struct tree_base base; tree tmpl; tree args; vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking; }; // Constraint information for a C++ declaration. Constraint information is // comprised of: // // - a constraint expression introduced by the template header // - a constraint expression introduced by a function declarator // - the associated constraints, which are the conjunction of those, // and used for declaration matching // // The template and declarator requirements are kept to support pretty // printing constrained declarations. struct GTY(()) tree_constraint_info { struct tree_base base; tree template_reqs; tree declarator_reqs; tree associated_constr; }; // Require that pointer P is non-null before returning. template<typename T> inline T* check_nonnull (T* p) { gcc_assert (p); return p; } /* Returns true iff T is non-null and represents constraint info. */ inline tree_constraint_info * check_constraint_info (tree t) { if (t && TREE_CODE (t) == CONSTRAINT_INFO) return (tree_constraint_info *)t; return NULL; } /* Access the expression describing the template constraints. This may be null if no constraints were introduced in the template parameter list, a requirements clause after the template parameter list, or constraints through a constrained-type-specifier. */ #define CI_TEMPLATE_REQS(NODE) \ check_constraint_info (check_nonnull (NODE))->template_reqs /* Access the expression describing the trailing constraints. This is non-null for any implicit instantiation of a constrained declaration. For a templated declaration it is non-null only when a trailing requires-clause was specified. */ #define CI_DECLARATOR_REQS(NODE) \ check_constraint_info (check_nonnull (NODE))->declarator_reqs /* The computed associated constraint expression for a declaration. */ #define CI_ASSOCIATED_CONSTRAINTS(NODE) \ check_constraint_info (check_nonnull (NODE))->associated_constr /* Access the constraint-expression introduced by the requires-clause associate the template parameter list NODE. */ #define TEMPLATE_PARMS_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) /* Access the logical constraints on the template parameter declaration indicated by NODE. */ #define TEMPLATE_PARM_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) /* Non-zero if the noexcept is present in a compound requirement. */ #define COMPOUND_REQ_NOEXCEPT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ)) /* The constraints on an 'auto' placeholder type, used in an argument deduction constraint. */ #define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \ DECL_SIZE_UNIT (TYPE_NAME (NODE)) /* True if NODE is a constraint. */ #define CONSTR_P(NODE) \ (TREE_CODE (NODE) == ATOMIC_CONSTR \ || TREE_CODE (NODE) == CONJ_CONSTR \ || TREE_CODE (NODE) == DISJ_CONSTR) /* Valid for any normalized constraint. */ #define CONSTR_CHECK(NODE) \ TREE_CHECK3 (NODE, ATOMIC_CONSTR, CONJ_CONSTR, DISJ_CONSTR) /* The CONSTR_INFO stores normalization data for a constraint. It refers to the original expression and the expression or declaration from which the constraint was normalized. This is TREE_LIST whose TREE_PURPOSE is the original expression and whose TREE_VALUE is a list of contexts. */ #define CONSTR_INFO(NODE) \ TREE_TYPE (CONSTR_CHECK (NODE)) /* The expression evaluated by the constraint. */ #define CONSTR_EXPR(NODE) \ TREE_PURPOSE (CONSTR_INFO (NODE)) /* The expression or declaration from which this constraint was normalized. This is a TREE_LIST whose TREE_VALUE is either a template-id expression denoting a concept check or the declaration introducing the constraint. These are chained to other context objects. */ #define CONSTR_CONTEXT(NODE) \ TREE_VALUE (CONSTR_INFO (NODE)) /* The parameter mapping for an atomic constraint. */ #define ATOMIC_CONSTR_MAP(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ATOMIC_CONSTR), 0) /* The expression of an atomic constraint. */ #define ATOMIC_CONSTR_EXPR(NODE) \ CONSTR_EXPR (ATOMIC_CONSTR_CHECK (NODE)) /* The concept of a concept check. */ #define CHECK_CONSTR_CONCEPT(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0) /* The template arguments of a concept check. */ #define CHECK_CONSTR_ARGS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1) /* Whether a PARM_DECL represents a local parameter in a requires-expression. */ #define CONSTRAINT_VAR_P(NODE) \ DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL)) /* The concept constraining this constrained template-parameter. */ #define CONSTRAINED_PARM_CONCEPT(NODE) \ DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE)) /* Any extra template arguments specified for a constrained template-parameter. */ #define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \ DECL_SIZE (TYPE_DECL_CHECK (NODE)) /* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a prototype for the constrained parameter in finish_shorthand_constraint, attached for convenience. */ #define CONSTRAINED_PARM_PROTOTYPE(NODE) \ DECL_INITIAL (TYPE_DECL_CHECK (NODE)) enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_PTRMEM, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_TEMPLATE_DECL, TS_CP_DEFERRED_PARSE, TS_CP_DEFERRED_NOEXCEPT, TS_CP_STATIC_ASSERT, TS_CP_ARGUMENT_PACK_SELECT, TS_CP_TRAIT_EXPR, TS_CP_LAMBDA_EXPR, TS_CP_TEMPLATE_INFO, TS_CP_CONSTRAINT_INFO, TS_CP_USERDEF_LITERAL }; /* The resulting tree type. */ union GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl; struct tree_deferred_parse GTY ((tag ("TS_CP_DEFERRED_PARSE"))) deferred_parse; struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) static_assertion; struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) argument_pack_select; struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) trait_expression; struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) lambda_expression; struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) template_info; struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO"))) constraint_info; struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL"))) userdef_literal; }; /* Global state. */ struct GTY(()) saved_scope { vec<cxx_saved_binding, va_gc> *old_bindings; tree old_namespace; vec<tree, va_gc> *decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; vec<tree, va_gc> *lang_base; tree lang_name; tree template_parms; cp_binding_level *x_previous_class_level; tree x_saved_tree; /* Only used for uses of this in trailing return type. */ tree x_current_class_ptr; tree x_current_class_ref; int x_processing_template_decl; int x_processing_specialization; int x_processing_constraint; int suppress_location_wrappers; BOOL_BITFIELD x_processing_explicit_instantiation : 1; BOOL_BITFIELD need_pop_function_context : 1; /* Nonzero if we are parsing the discarded statement of a constexpr if-statement. */ BOOL_BITFIELD discarded_stmt : 1; int unevaluated_operand; int inhibit_evaluation_warnings; int noexcept_operand; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int omp_declare_target_attribute; int ref_temp_count; struct stmt_tree_s x_stmt_tree; cp_binding_level *class_bindings; cp_binding_level *bindings; hash_map<tree, tree> *GTY((skip)) x_local_specializations; struct saved_scope *prev; }; extern GTY(()) struct saved_scope *scope_chain; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* When parsing a template declaration, a TREE_LIST represents the active template parameters. Each node in the list represents one level of template parameters. The innermost level is first in the list. The depth of each level is stored as an INTEGER_CST in the TREE_PURPOSE of each node. The parameters for that level are stored in the TREE_VALUE. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation #define in_discarded_stmt scope_chain->discarded_stmt #define current_ref_temp_count scope_chain->ref_temp_count /* RAII sentinel to handle clearing processing_template_decl and restoring it when done. */ class processing_template_decl_sentinel { public: int saved; processing_template_decl_sentinel (bool reset = true) : saved (processing_template_decl) { if (reset) processing_template_decl = 0; } ~processing_template_decl_sentinel() { processing_template_decl = saved; } }; /* RAII sentinel to disable certain warnings during template substitution and elsewhere. */ class warning_sentinel { public: int &flag; int val; warning_sentinel(int& flag, bool suppress=true) : flag(flag), val(flag) { if (suppress) flag = 0; } ~warning_sentinel() { flag = val; } }; /* RAII sentinel to temporarily override input_location. This will not set input_location to UNKNOWN_LOCATION or BUILTINS_LOCATION. */ class iloc_sentinel { location_t saved_loc; public: iloc_sentinel (location_t loc): saved_loc (input_location) { if (loc >= RESERVED_LOCATION_COUNT) input_location = loc; } ~iloc_sentinel () { input_location = saved_loc; } }; /* RAII sentinel that saves the value of a variable, optionally overrides it right away, and restores its value when the sentinel id destructed. */ template <typename T> class temp_override { T& overridden_variable; T saved_value; public: temp_override(T& var) : overridden_variable (var), saved_value (var) {} temp_override(T& var, T overrider) : overridden_variable (var), saved_value (var) { overridden_variable = overrider; } ~temp_override() { overridden_variable = saved_value; } }; /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A map from local variable declarations in the body of the template presently being instantiated to the corresponding instantiated local variables. */ #define local_specializations scope_chain->x_local_specializations /* Nonzero if we are parsing the operand of a noexcept operator. */ #define cp_noexcept_operand scope_chain->noexcept_operand /* A list of private types mentioned, for deferred access checking. */ struct GTY((for_user)) cxx_int_tree_map { unsigned int uid; tree to; }; struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map> { static hashval_t hash (cxx_int_tree_map *); static bool equal (cxx_int_tree_map *, cxx_int_tree_map *); }; struct named_label_entry; /* Defined in decl.c. */ struct named_label_hash : ggc_remove <named_label_entry *> { typedef named_label_entry *value_type; typedef tree compare_type; /* An identifier. */ inline static hashval_t hash (value_type); inline static bool equal (const value_type, compare_type); static const bool empty_zero_p = true; inline static void mark_empty (value_type &p) {p = NULL;} inline static bool is_empty (value_type p) {return !p;} /* Nothing is deletable. Everything is insertable. */ inline static bool is_deleted (value_type) { return false; } inline static void mark_deleted (value_type) { gcc_unreachable (); } }; /* Global state pertinent to the current function. */ struct GTY(()) language_function { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; BOOL_BITFIELD returns_value : 1; BOOL_BITFIELD returns_null : 1; BOOL_BITFIELD returns_abnormally : 1; BOOL_BITFIELD infinite_loop: 1; BOOL_BITFIELD x_in_function_try_handler : 1; BOOL_BITFIELD x_in_base_initializer : 1; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; BOOL_BITFIELD invalid_constexpr : 1; BOOL_BITFIELD throwing_cleanup : 1; hash_table<named_label_hash> *x_named_labels; cp_binding_level *bindings; /* Tracking possibly infinite loops. This is a vec<tree> only because vec<bool> doesn't work with gtype. */ vec<tree, va_gc> *infinite_loops; hash_table<cxx_int_tree_map_hasher> *extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been done. I.e., just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ptr \ : &scope_chain->x_current_class_ptr)) #define current_class_ref \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ref \ : &scope_chain->x_current_class_ref)) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* A boolean flag to control whether we need to clean up the return value if a local destructor throws. Only used in functions that return by value a class with a destructor. Which 'tors don't, so we can use the same field as current_vtt_parm. */ #define current_retval_sentinel current_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Set to 0 at beginning of a function definition, set to 1 if we see an obvious infinite loop. This can have false positives and false negatives, so it should only be used as a heuristic. */ #define current_function_infinite_loop cp_function_chain->infinite_loop /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->x_in_base_initializer #define in_function_try_handler cp_function_chain->x_in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* In parser.c. */ extern tree cp_literal_operator_id (const char *); #define NON_ERROR(NODE) ((NODE) == error_mark_node ? NULL_TREE : (NODE)) /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Any kind of anonymous type. */ #define TYPE_ANON_P(NODE) \ (TYPE_LINKAGE_IDENTIFIER (NODE) \ && IDENTIFIER_ANON_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* Nonzero if NODE, a TYPE, has no name for linkage purposes. */ #define TYPE_UNNAMED_P(NODE) \ (TYPE_ANON_P (NODE) \ && !IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a type that could resolve to any kind of concrete type at instantiation time. */ #define WILDCARD_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (T) == DECLTYPE_TYPE) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T)) /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or union type. */ #define SET_CLASS_TYPE_P(T, VAL) \ (TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class type but not an union. */ #define NON_UNION_CLASS_TYPE_P(T) \ (TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T)) /* Keep these checks in ascending code order. */ #define RECORD_OR_UNION_CODE_P(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define OVERLOAD_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) struct GTY (()) tree_pair_s { tree purpose; tree value; }; typedef tree_pair_s *tree_pair_p; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct GTY(()) lang_type { unsigned char align; unsigned has_type_conversion : 1; unsigned has_copy_ctor : 1; unsigned has_default_ctor : 1; unsigned const_needs_init : 1; unsigned ref_needs_init : 1; unsigned has_const_copy_assign : 1; unsigned use_template : 2; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_copy_assign : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; /* 32 bits allocated. */ unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned ptrmemfunc_flag : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_copy_assign : 1; unsigned lazy_destructor : 1; unsigned has_const_copy_ctor : 1; unsigned has_complex_copy_ctor : 1; unsigned has_complex_copy_assign : 1; unsigned non_aggregate : 1; unsigned has_complex_dflt : 1; unsigned has_list_ctor : 1; unsigned non_std_layout : 1; unsigned is_literal : 1; unsigned lazy_move_ctor : 1; unsigned lazy_move_assign : 1; unsigned has_complex_move_ctor : 1; unsigned has_complex_move_assign : 1; unsigned has_constexpr_ctor : 1; unsigned unique_obj_representations : 1; unsigned unique_obj_representations_set : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 5; tree primary_base; vec<tree_pair_s, va_gc> *vcall_indices; tree vtables; tree typeinfo_var; vec<tree, va_gc> *vbases; binding_table nested_udts; tree as_base; vec<tree, va_gc> *pure_virtuals; tree friend_classes; vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members; tree key_method; tree decl_list; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* FIXME reuse another field? */ tree lambda_expr; }; /* We used to have a variant type for lang_type. Keep the name of the checking accessor for the sole survivor. */ #define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE)) /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has a move constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that NODE (a class type) is final */ #define CLASSTYPE_FINAL(NODE) \ TYPE_FINAL_P (NODE) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor) #define TYPE_HAS_CONST_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) /* Nonzero if this class has an X(initializer_list<T>) constructor. */ #define TYPE_HAS_LIST_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) /* Nonzero if this class has a constexpr constructor other than a copy/move constructor. Note that a class can have constexpr constructors for static initialization even if it isn't a literal class. */ #define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Nonzero means that this type is either complete or being defined, so we can do lookup in it. */ #define COMPLETE_OR_OPEN_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector of members. During definition, it is unordered and only member functions are present. After completion it is sorted and contains both member functions and non-functions. STAT_HACK is involved to preserve oneslot per name invariant. */ #define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ (get_class_binding_direct (NODE, ctor_identifier)) /* A FUNCTION_DECL for the destructor for NODE. This is the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTOR(NODE) \ (get_class_binding_direct (NODE, dtor_identifier)) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes or tail padding. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* A vec<tree> of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type is an abstract class type. */ #define ABSTRACT_CLASS_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE)) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is not POD for the purpose of layout (as defined in the ABI). This is different from the language's POD. */ #define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class type is a non-standard-layout class. */ #define CLASSTYPE_NON_STD_LAYOUT(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) /* Nonzero means that this class type does have unique object representations. */ #define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations) /* Nonzero means that this class type has CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */ #define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* The associated LAMBDA_EXPR that made this class. */ #define CLASSTYPE_LAMBDA_EXPR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) /* The extra mangling scope for this closure type. */ #define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* A vec<tree_pair_s> of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* Whether or not this entry is for a lost primary virtual base. */ #define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. For a noexcept specification, TREE_VALUE is NULL_TREE and TREE_PURPOSE is the constant-expression. For a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT (for templates) or an OVERLOAD list of functions (for implicitly declared functions). */ #define TYPE_RAISES_EXCEPTIONS(NODE) \ TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' or noexcept(true). */ #define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the case for things declared noexcept(true) and, with -fnothrow-opt, for throw() functions. */ #define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (LANG_DECL_NS_CHECK (NODE)->level) /* Discriminator values for lang_decl. */ enum lang_decl_selector { lds_min, lds_fn, lds_ns, lds_parm, lds_decomp }; /* Flags shared by all forms of DECL_LANG_SPECIFIC. Some of the flags live here only to make lang_decl_min/fn smaller. Do not make this struct larger than 32 bits; instead, make sel smaller. */ struct GTY(()) lang_decl_base { /* Larger than necessary for faster access. */ ENUM_BITFIELD(lang_decl_selector) selector : 16; ENUM_BITFIELD(languages) language : 1; unsigned use_template : 2; unsigned not_really_extern : 1; /* var or fn */ unsigned initialized_in_class : 1; /* var or fn */ unsigned threadprivate_or_deleted_p : 1; /* var or fn */ unsigned anticipated_p : 1; /* fn, type or template */ /* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */ unsigned friend_or_tls : 1; /* var, fn, type or template */ unsigned unknown_bound_p : 1; /* var */ unsigned odr_used : 1; /* var or fn */ unsigned spare : 1; unsigned concept_p : 1; /* applies to vars and functions */ unsigned var_declared_inline_p : 1; /* var */ unsigned dependent_init_p : 1; /* var */ /* 2 spare bits */ }; /* True for DECL codes which have template info and access. */ #define LANG_DECL_HAS_MIN(NODE) \ (VAR_OR_FUNCTION_DECL_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == USING_DECL \ || TREE_CODE (NODE) == CONCEPT_DECL) /* DECL_LANG_SPECIFIC for the above codes. */ struct GTY(()) lang_decl_min { struct lang_decl_base base; /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree template_info; /* In a DECL_THUNK_P FUNCTION_DECL, this is THUNK_VIRTUAL_OFFSET. In a lambda-capture proxy VAR_DECL, this is DECL_CAPTURED_VARIABLE. In a function-scope TREE_STATIC VAR_DECL or IMPLICIT_TYPEDEF_P TYPE_DECL, this is DECL_DISCRIMINATOR. Otherwise, in a class-scope DECL, this is DECL_ACCESS. */ tree access; }; /* Additional DECL_LANG_SPECIFIC information for functions. */ struct GTY(()) lang_decl_fn { struct lang_decl_min min; /* In a overloaded operator, this is the compressed operator code. */ unsigned ovl_op_code : 6; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned defaulted_p : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned pending_inline_p : 1; unsigned nonconverting : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned hidden_friend_p : 1; unsigned omp_declare_reduction_p : 1; unsigned has_dependent_explicit_spec_p : 1; unsigned immediate_fn_p : 1; unsigned maybe_deleted : 1; unsigned coroutine_p : 1; unsigned spare : 9; /* 32-bits padding on 64-bit host. */ /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%1.thunk_p"))) u5; union lang_decl_u3 { struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; tree GTY ((tag ("0"))) saved_auto_return_type; } GTY ((desc ("%1.pending_inline_p"))) u; }; /* DECL_LANG_SPECIFIC for namespaces. */ struct GTY(()) lang_decl_ns { struct lang_decl_base base; cp_binding_level *level; /* Inline children. These need to be va_gc, because of PCH. */ vec<tree, va_gc> *inlinees; /* Hash table of bound decls. It'd be nice to have this inline, but as the hash_map has a dtor, we can't then put this struct into a union (until moving to c++11). */ hash_table<named_decl_hash> *bindings; }; /* DECL_LANG_SPECIFIC for parameters. */ struct GTY(()) lang_decl_parm { struct lang_decl_base base; int level; int index; }; /* Additional DECL_LANG_SPECIFIC information for structured bindings. */ struct GTY(()) lang_decl_decomp { struct lang_decl_min min; /* The artificial underlying "e" variable of the structured binding variable. */ tree base; }; /* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a union rather than a struct containing a union as its only field, but tree.h declares it as a struct. */ struct GTY(()) lang_decl { union GTY((desc ("%h.base.selector"))) lang_decl_u { /* Nothing of only the base type exists. */ struct lang_decl_base GTY ((default)) base; struct lang_decl_min GTY((tag ("lds_min"))) min; struct lang_decl_fn GTY ((tag ("lds_fn"))) fn; struct lang_decl_ns GTY((tag ("lds_ns"))) ns; struct lang_decl_parm GTY((tag ("lds_parm"))) parm; struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp; } u; }; /* Looks through a template (if present) to find what it declares. */ #define STRIP_TEMPLATE(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_MIN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE)) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min; }) /* We want to be able to check DECL_CONSTRUCTOR_P and such on a function template, not just on a FUNCTION_DECL. So when looking for things in lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ #define LANG_DECL_FN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ if (!DECL_DECLARES_FUNCTION_P (NODE) \ || lt->u.base.selector != lds_fn) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.fn; }) #define LANG_DECL_NS_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != NAMESPACE_DECL \ || lt->u.base.selector != lds_ns) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ns; }) #define LANG_DECL_PARM_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != PARM_DECL \ || lt->u.base.selector != lds_parm) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.parm; }) #define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!VAR_P (NODE) \ || lt->u.base.selector != lds_decomp) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.decomp; }) #else #define LANG_DECL_MIN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.min) #define LANG_DECL_FN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) #define LANG_DECL_NS_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.ns) #define LANG_DECL_PARM_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.parm) #define LANG_DECL_DECOMP_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.decomp) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) /* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ #define DECL_MOVE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) /* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL) is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if either DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P or DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P is true of NODE. */ #define DECL_MAYBE_IN_CHARGE_CDTOR_P(NODE) \ (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (NODE) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (NODE)) /* Nonzero if NODE (a _DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) \ (DECL_NAME (NODE) \ && IDENTIFIER_CDTOR_P (DECL_NAME (NODE)) \ && !DECL_MAYBE_IN_CHARGE_CDTOR_P (NODE)) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE))->u.fn.u5.cloned_function) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (!(TREE_CODE (FN) == FUNCTION_DECL \ && DECL_MAYBE_IN_CHARGE_CDTOR_P (FN))) \ ; \ else \ for (CLONE = DECL_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = DECL_CHAIN (CLONE)) /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (((TREE_CODE (NODE) == VAR_DECL && TREE_STATIC (NODE)) \ || DECL_IMPLICIT_TYPEDEF_P (NODE)) \ && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_MIN_CHECK (NODE)->access) /* The index of a user-declared parameter in its function, starting at 1. All artificial parameters will have index 0. */ #define DECL_PARM_INDEX(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->index) /* The level of a user-declared parameter in its function, starting at 1. A parameter of the function will have level 1; a parameter of the first nested function declarator (i.e. t in void f (void (*p)(T t))) will have level 2. */ #define DECL_PARM_LEVEL(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->level) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE)) /* The type to which conversion operator FN converts to. */ #define DECL_CONV_FN_TYPE(FN) \ TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN))) /* Nonzero if NODE, a static data member, was declared in its class as an array of unknown bound. */ #define VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \ : false) #define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true) /* True iff decl NODE is for an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ IDENTIFIER_ANY_OP_P (DECL_NAME (NODE)) /* Nonzero if NODE is an assignment operator (including += and such). */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE)) /* NODE is a function_decl for an overloaded operator. Return its compressed (raw) operator code. Note that this is not a TREE_CODE. */ #define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \ (LANG_DECL_FN_CHECK (NODE)->ovl_op_code) /* DECL is an overloaded operator. Test whether it is for TREE_CODE (a literal constant). */ #define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \ (DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it means that no definition has been seen, even if an initializer has been. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERNAL, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided or a non-trivial constructor is called. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.initialized_in_class) /* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. Only available for decls with DECL_LANG_SPECIFIC. */ #define DECL_ODR_USED(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.odr_used) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.friend_or_tls) /* Nonzero if the thread-local variable was declared with __thread as opposed to thread_local. */ #define DECL_GNU_TLS_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls) #define SET_DECL_GNU_TLS_P(NODE) \ (retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \ DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for a FIELD_DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (FIELD_DECL_CHECK (NODE))) /* Nonzero for _DECL means that this constructor or conversion function is non-converting. */ #define DECL_NONCONVERTING_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pure_virtual) /* Nonzero for FUNCTION_DECL means that this member function (either a constructor or a conversion function) has an explicit specifier with a value-dependent expression. */ #define DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_dependent_explicit_spec_p) /* Nonzero for a defaulted FUNCTION_DECL for which we haven't decided yet if it's deleted; we will decide in synthesize_method. */ #define DECL_MAYBE_DELETED(NODE) \ (LANG_DECL_FN_CHECK (NODE)->maybe_deleted) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* True (in a FUNCTION_DECL) if NODE is a function declared with an override virt-specifier */ #define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* Set DECL_THUNKS. */ #define SET_DECL_THUNKS(NODE,THUNKS) \ (LANG_DECL_FN_CHECK (NODE)->context = (THUNKS)) /* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this is the constructor it inherits from. */ #define DECL_INHERITED_CTOR(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* And this is the base that constructor comes from. */ #define DECL_INHERITED_CTOR_BASE(NODE) \ (DECL_INHERITED_CTOR (NODE) \ ? DECL_CONTEXT (flag_new_inheriting_ctors \ ? strip_inheriting_ctors (NODE) \ : DECL_INHERITED_CTOR (NODE)) \ : NULL_TREE) /* Set the inherited base. */ #define SET_DECL_INHERITED_CTOR(NODE,INH) \ (LANG_DECL_FN_CHECK (NODE)->context = (INH)) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && LANG_DECL_FN_CHECK (NODE)->thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True if DECL is declared 'constexpr'. */ #define DECL_DECLARED_CONSTEXPR_P(DECL) \ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) /* True if FNDECL is an immediate function. */ #define DECL_IMMEDIATE_FUNCTION_P(NODE) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (STRIP_TEMPLATE (NODE))) \ ? LANG_DECL_FN_CHECK (NODE)->immediate_fn_p \ : false) #define SET_DECL_IMMEDIATE_FUNCTION_P(NODE) \ (retrofit_lang_decl (FUNCTION_DECL_CHECK (NODE)), \ LANG_DECL_FN_CHECK (NODE)->immediate_fn_p = true) // True if NODE was declared as 'concept'. The flag implies that the // declaration is constexpr, that the declaration cannot be specialized or // refined, and that the result type must be convertible to bool. #define DECL_DECLARED_CONCEPT_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.concept_p) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (DECL_NAME (NODE) \ && id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__")) /* For a DECL, true if it is __func__ or similar. */ #define DECL_FNAME_P(NODE) \ (VAR_P (NODE) && DECL_NAME (NODE) && DECL_ARTIFICIAL (NODE) \ && DECL_HAS_VALUE_EXPR_P (NODE) \ && (id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__") \ || id_equal (DECL_NAME (NODE), "__FUNCTION__") \ || id_equal (DECL_NAME (NODE), "__func__"))) /* Nonzero if the variable was declared to be thread-local. We need a special C++ version of this test because the middle-end DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for templates. */ #define CP_DECL_THREAD_LOCAL_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f () { ... } }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_DECLARES_FUNCTION_P (NODE) \ && DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) #define CP_DECL_CONTEXT(NODE) \ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) \ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) #define NAMESPACE_SCOPE_P(NODE) \ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. We forward to DECL_VIRTUAL_P from the common code, as that has the semantics we need. But we want a more descriptive name. */ #define DECL_VTABLE_OR_VTT_P(NODE) DECL_VIRTUAL_P (VAR_DECL_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */ #define FUNCTION_REF_QUALIFIED(NODE) \ TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */ #define FUNCTION_RVALUE_QUALIFIED(NODE) \ TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* The nesting depth of namespace, class or function. Makes is_ancestor much simpler. Only 8 bits available. */ #define SCOPE_DEPTH(NODE) \ (NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space) /* Whether the namepace is an inline namespace. */ #define DECL_NAMESPACE_INLINE_P(NODE) \ TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, a vector of inline namespaces. */ #define DECL_NAMESPACE_INLINEES(NODE) \ (LANG_DECL_NS_CHECK (NODE)->inlinees) /* Pointer to hash_map from IDENTIFIERS to DECLS */ #define DECL_NAMESPACE_BINDINGS(NODE) \ (LANG_DECL_NS_CHECK (NODE)->bindings) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ ((NODE) == std_node) /* In a TREE_LIST in an attribute list, indicates that the attribute must be applied at instantiation time. */ #define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag was inherited from a template parameter, not explicitly indicated. */ #define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) DECL_RESULT_FLD (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* Non zero if the using decl refers to a dependent type. */ #define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) /* Nonzero for TYPE_DECL means that it was written 'using name = type'. */ #define TYPE_DECL_ALIAS_P(NODE) \ DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE)) /* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */ #define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \ DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a type which is an alias for another type; i.e, a type which declaration was written 'using name-of-type = another-type'. */ #define TYPE_ALIAS_P(NODE) \ (TYPE_P (NODE) \ && TYPE_NAME (NODE) \ && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \ && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE))) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL, TEMPLATE_DECL, or CONCEPT_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is nonzero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TEMPLATE_INFO, whose TI_TEMPLATE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TI_ARGS is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (TEMPLATE_INFO_DECL_CHECK (NODE)) \ ->u.min.template_info) /* For a lambda capture proxy, its captured variable. */ #define DECL_CAPTURED_VARIABLE(NODE) \ (LANG_DECL_MIN_CHECK (NODE)->access) /* For a VAR_DECL, indicates that the variable is actually a non-static data member of anonymous union that has been promoted to variable status. */ #define DECL_ANON_UNION_VAR_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE))) /* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias templateness of NODE. It'd be nice if this could unconditionally access the slot, rather than return NULL if given a non-templatable type. */ #define TYPE_TEMPLATE_INFO(NODE) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ || TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \ || RECORD_OR_UNION_TYPE_P (NODE) \ ? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE) /* Template information (if any) for an alias type. */ #define TYPE_ALIAS_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \ : NULL_TREE) /* If NODE is a type alias, this accessor returns the template info for the alias template (if any). Otherwise behave as TYPE_TEMPLATE_INFO. */ #define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \ (typedef_variant_p (NODE) \ ? TYPE_ALIAS_TEMPLATE_INFO (NODE) \ : TYPE_TEMPLATE_INFO (NODE)) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ || (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \ ? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \ : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))) #define TI_TEMPLATE(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK (NODE))->tmpl #define TI_ARGS(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK (NODE))->args #define TI_PENDING_TEMPLATE_FLAG(NODE) \ TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)) /* For a given TREE_VEC containing a template argument list, this property contains the number of arguments that are not defaulted. */ #define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ TREE_CHAIN (TREE_VEC_CHECK (NODE)) /* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT property. */ #define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) #if CHECKING_P #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) #else #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) #endif /* The list of typedefs - used in the template - that need access checking at template instantiation time. FIXME this should be associated with the TEMPLATE_DECL, not the TEMPLATE_INFO. */ #define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ (NODE))->typedefs_needing_access_checking /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. For each TREE_VEC containing the template arguments for a single level, it's possible to get or set the number of non defaulted template arguments by using the accessor macros GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. For a FIELD_DECL with a non-static data member initializer, this value is the FIELD_DECL it was instantiated from. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. For a member class template, CLASSTYPE_TI_TEMPLATE always refers to the partial instantiation rather than the primary template. CLASSTYPE_TI_ARGS are for the primary template if the partial instantiation isn't specialized, or for the explicit specialization if it is, e.g. template <class T> class C { template <class U> class D; } template <> template <class U> class C<int>::D; */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */ #define DECL_PACK_P(NODE) \ (DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE))) /* Determines if NODE is an expansion of one or more parameter packs, e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_P(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) /* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_PATTERN(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* The list of parameter packs used in the PACK_EXPANSION_* node. The TREE_VALUE of each TREE_LIST contains the parameter packs. */ #define PACK_EXPANSION_PARAMETER_PACKS(NODE) \ *(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \ ? &TREE_OPERAND (NODE, 1) \ : &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE))) /* Any additional template args to be applied when substituting into the pattern, set by tsubst_pack_expansion for partial instantiations. If this is a TREE_LIST, the TREE_VALUE of the first element is the usual template argument TREE_VEC, and the TREE_PURPOSE of later elements are enclosing functions that provided function parameter packs we'll need to map appropriately. */ #define PACK_EXPANSION_EXTRA_ARGS(NODE) \ *(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ ? &TYPE_MAX_VALUE_RAW (NODE) \ : &TREE_OPERAND ((NODE), 2)) /* True iff this pack expansion is within a function context. */ #define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE) /* True iff this pack expansion is for sizeof.... */ #define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE) /* True iff the wildcard can match a template parameter pack. */ #define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE) /* Determine if this is an argument pack. */ #define ARGUMENT_PACK_P(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) /* The arguments stored in an argument pack. Arguments are stored in a TREE_VEC, which may have length zero. */ #define ARGUMENT_PACK_ARGS(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Set the arguments stored in an argument pack. VALUE must be a TREE_VEC. */ #define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* Whether the argument pack is "incomplete", meaning that more arguments can still be deduced. Incomplete argument packs are only used when the user has provided an explicit template argument list for a variadic function template. Some of the explicit template arguments will be placed into the beginning of the argument pack, but additional arguments might still be deduced. */ #define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE)) /* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template arguments used to fill this pack. */ #define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) /* In an ARGUMENT_PACK_SELECT, the argument pack from which an argument will be selected. */ #define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) /* In an ARGUMENT_PACK_SELECT, the index of the argument we want to select. */ #define ARGUMENT_PACK_SELECT_INDEX(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) #define FOLD_EXPR_CHECK(NODE) \ TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \ BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) #define BINARY_FOLD_EXPR_CHECK(NODE) \ TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) /* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */ #define FOLD_EXPR_P(NODE) \ (TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR) /* True when NODE is a fold over a compound assignment operator. */ #define FOLD_EXPR_MODIFY_P(NODE) \ TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE)) /* An INTEGER_CST containing the tree code of the folded operator. */ #define FOLD_EXPR_OP(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0) /* The expression containing an unexpanded parameter pack. */ #define FOLD_EXPR_PACK(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1) /* In a binary fold expression, the argument with no unexpanded parameter packs. */ #define FOLD_EXPR_INIT(NODE) \ TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2) /* In a FUNCTION_DECL, the saved auto-return pattern. */ #define DECL_SAVED_AUTO_RETURN_TYPE(NODE) \ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ ->u.saved_auto_return_type) /* True if NODE is an implicit INDIRECT_REF from convert_from_reference. */ #define REFERENCE_REF_P(NODE) \ (INDIRECT_REF_P (NODE) \ && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ && TYPE_REF_P (TREE_TYPE (TREE_OPERAND ((NODE), 0)))) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) #define CALL_OR_AGGR_INIT_CHECK(NODE) \ TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* In a CALL_EXPR, true for allocator calls from new or delete expressions. */ #define CALL_FROM_NEW_OR_DELETE_P(NODE) \ TREE_LANG_FLAG_2 (CALL_EXPR_CHECK (NODE)) /* True if the arguments to NODE should be evaluated in left-to-right order regardless of PUSH_ARGS_REVERSED. */ #define CALL_EXPR_ORDERED_ARGS(NODE) \ TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* True if the arguments to NODE should be evaluated in right-to-left order regardless of PUSH_ARGS_REVERSED. */ #define CALL_EXPR_REVERSE_ARGS(NODE) \ TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* True if CALL_EXPR was written as an operator expression, not a function call. */ #define CALL_EXPR_OPERATOR_SYNTAX(NODE) \ TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some of the time in C++14 mode. */ #define REF_PARENTHESIZED_P(NODE) \ TREE_LANG_FLAG_2 (TREE_CHECK4 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF, VIEW_CONVERT_EXPR)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize the object. */ #define AGGR_INIT_ZERO_FIRST(NODE) \ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero means that the call is the jump from a thunk to the thunked-to function. */ #define AGGR_INIT_FROM_THUNK_P(NODE) \ (AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag) /* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of CALL_EXPR_STATIC_CHAIN). */ #define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) #define AGGR_INIT_EXPR_SLOT(NODE) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) #define AGGR_INIT_EXPR_ARG(NODE, I) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) #define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) /* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if the argument count is zero when checking is enabled. Instead, do the pointer arithmetic to advance past the 3 fixed operands in a AGGR_INIT_EXPR. That produces a valid pointer to just past the end of the operand array, even if it's not valid to dereference it. */ #define AGGR_INIT_EXPR_ARGP(NODE) \ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) /* Abstract iterators for AGGR_INIT_EXPRs. */ /* Structure containing iterator state. */ struct aggr_init_expr_arg_iterator { tree t; /* the aggr_init_expr */ int n; /* argument count */ int i; /* next argument index */ }; /* Initialize the abstract argument list iterator object ITER with the arguments from AGGR_INIT_EXPR node EXP. */ inline void init_aggr_init_expr_arg_iterator (tree exp, aggr_init_expr_arg_iterator *iter) { iter->t = exp; iter->n = aggr_init_expr_nargs (exp); iter->i = 0; } /* Return the next argument from abstract argument list iterator object ITER, and advance its state. Return NULL_TREE if there are no more arguments. */ inline tree next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) { tree result; if (iter->i >= iter->n) return NULL_TREE; result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); iter->i++; return result; } /* Initialize the abstract argument list iterator object ITER, then advance past and return the first argument. Useful in for expressions, e.g. for (arg = first_aggr_init_expr_arg (exp, &iter); arg; arg = next_aggr_init_expr_arg (&iter)) */ inline tree first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) { init_aggr_init_expr_arg_iterator (exp, iter); return next_aggr_init_expr_arg (iter); } /* Test whether there are more arguments in abstract argument list iterator ITER, without changing its state. */ inline bool more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) { return (iter->i < iter->n); } /* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ #define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ (arg) = next_aggr_init_expr_arg (&(iter))) /* VEC_INIT_EXPR accessors. */ #define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0) #define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1) /* Indicates that a VEC_INIT_EXPR is a potential constant expression. Only set when the current function is constexpr. */ #define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) /* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ #define VEC_INIT_EXPR_VALUE_INIT(NODE) \ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) /* The condition under which this MUST_NOT_THROW_EXPR actually blocks exceptions. NULL_TREE means 'true'. */ #define MUST_NOT_THROW_COND(NODE) \ TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) \ (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE is in the process of being resolved. */ #define TYPENAME_IS_RESOLVING_P(NODE) \ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'break' stmts. */ #define LABEL_DECL_BREAK(NODE) \ DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'continue' stmts. */ #define LABEL_DECL_CONTINUE(NODE) \ DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'return' stmts in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */ #define LABEL_DECL_CDTOR(NODE) \ DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE)) /* True if NODE was declared with auto in its return type, but it has started compilation and so the return type might have been changed by return type deduction; its declared return type should be found in DECL_SAVED_AUTO_RETURN_TYPE (NODE). */ #define FNDECL_USED_AUTO(NODE) \ TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.anticipated_p) /* Is DECL NODE a hidden name? */ #define DECL_HIDDEN_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && TYPE_FUNCTION_OR_TEMPLATE_DECL_P (NODE) \ && DECL_ANTICIPATED (NODE)) /* True if this is a hidden class type. */ #define TYPE_HIDDEN_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ && DECL_ANTICIPATED (TYPE_NAME (NODE))) /* True for artificial decls added for OpenMP privatized non-static data members. */ #define DECL_OMP_PRIVATIZED_MEMBER(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p) /* Nonzero if NODE is an artificial FUNCTION_DECL for #pragma omp declare reduction. */ #define DECL_OMP_DECLARE_REDUCTION_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if NODE is a VAR_DECL which has been declared inline. */ #define DECL_VAR_DECLARED_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \ : false) #define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \ = true) /* True if NODE is a constant variable with a value-dependent initializer. */ #define DECL_DEPENDENT_INIT_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p) #define SET_DECL_DEPENDENT_INIT_P(NODE, X) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X)) /* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding declaration or one of VAR_DECLs for the user identifiers in it. */ #define DECL_DECOMPOSITION_P(NODE) \ (VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \ : false) /* The underlying artificial VAR_DECL for structured binding. */ #define DECL_DECOMP_BASE(NODE) \ (LANG_DECL_DECOMP_CHECK (NODE)->base) /* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members declared with constexpr specifier are implicitly inline variables. */ #define DECL_INLINE_VAR_P(NODE) \ (DECL_VAR_DECLARED_INLINE_P (NODE) \ || (cxx_dialect >= cxx17 \ && DECL_DECLARED_CONSTEXPR_P (NODE) \ && DECL_CLASS_SCOPE_P (NODE))) /* Nonzero if DECL was declared with '= delete'. */ #define DECL_DELETED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= default' (maybe implicitly). */ #define DECL_DEFAULTED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->defaulted_p) /* Nonzero if DECL is explicitly defaulted in the class body. */ #define DECL_DEFAULTED_IN_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) /* Nonzero if DECL was defaulted outside the class body. */ #define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) \ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* Returns true if TYPE is an integral or unscoped enumeration type. */ #define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) /* True if the class type TYPE is a literal type. */ #define CLASSTYPE_LITERAL_P(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. As a GNU extension, we also accept complex types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) \ || TREE_CODE (TYPE) == REAL_TYPE \ || TREE_CODE (TYPE) == COMPLEX_TYPE) /* True iff TYPE is cv decltype(nullptr). */ #define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, pointer-to-member types, and std::nullptr_t are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRDATAMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE) \ || NULLPTR_TYPE_P (TYPE)) /* Determines whether this type is a C++0x scoped enumeration type. Scoped enumerations types are introduced via "enum class" or "enum struct", e.g., enum class Color { Red, Green, Blue }; Scoped enumeration types are different from normal (unscoped) enumeration types in several ways: - The enumerators of a scoped enumeration type are only available within the scope of the enumeration type and not in the enclosing scope. For example, the Red color can be referred to with "Color::Red" but not "Red". - Scoped enumerators and enumerations do not implicitly convert to integers or 'bool'. - The underlying type of the enum is well-defined. */ #define SCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) /* Determine whether this is an unscoped enumeration type. */ #define UNSCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) /* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped enumeration type (1) or a normal (unscoped) enumeration type (0). */ #define SET_SCOPED_ENUM_P(TYPE, VAL) \ (ENUM_IS_SCOPED (TYPE) = (VAL)) #define SET_OPAQUE_ENUM_P(TYPE, VAL) \ (ENUM_IS_OPAQUE (TYPE) = (VAL)) #define OPAQUE_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) /* Determines whether an ENUMERAL_TYPE has an explicit underlying type. */ #define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) /* Returns the underlying type of the given enumeration type. The underlying type is determined in different ways, depending on the properties of the enum: - In C++0x, the underlying type can be explicitly specified, e.g., enum E1 : char { ... } // underlying type is char - In a C++0x scoped enumeration, the underlying type is int unless otherwises specified: enum class E2 { ... } // underlying type is int - Otherwise, the underlying type is determined based on the values of the enumerators. In this case, the ENUM_UNDERLYING_TYPE will not be set until after the definition of the enumeration is completed by finish_enum. */ #define ENUM_UNDERLYING_TYPE(TYPE) \ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-provided constructors, no brace-or-equal-initializers for non-static data members, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (gnu_vector_type_p (TYPE) \ || TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && COMPLETE_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a late-specified return type. */ #define TYPE_HAS_LATE_RETURN_TYPE(NODE) \ (TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE))) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\ && !TREE_HAS_CONSTRUCTOR (NODE)) /* True if NODE is a init-list used as a direct-initializer, i.e. B b{1,2}, not B b({1,2}) or B b = {1,2}. */ #define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR is instantiation-dependent and needs to be substituted. */ #define CONSTRUCTOR_IS_DEPENDENT(NODE) \ (TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR should not be used as a variable initializer because it was loaded from a constexpr variable with mutable fields. */ #define CONSTRUCTOR_MUTABLE_POISON(NODE) \ (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE))) /* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather than C++11 functional cast syntax. */ #define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \ (TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */ #define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \ (TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE))) #define DIRECT_LIST_INIT_P(NODE) \ (BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE)) /* True if this is a designated initializer (when we allow initializer-clauses mixed with designated-initializer-clauses set whenever there is at least one designated-initializer-clause), or a C99 designator. */ #define CONSTRUCTOR_IS_DESIGNATED_INIT(NODE) \ (TREE_LANG_FLAG_6 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR comes from a parenthesized list of values, e.g. A(1, 2, 3). */ #define CONSTRUCTOR_IS_PAREN_INIT(NODE) \ (CONSTRUCTOR_CHECK(NODE)->base.private_flag) /* True if NODE represents a conversion for direct-initialization in a template. Set by perform_implicit_conversion_flags. */ #define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \ (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* True if NODE represents a dependent conversion of a non-type template argument. Set by maybe_convert_nontype_argument. */ #define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \ (TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* True if NODE represents a conversion for braced-init-list in a template. Set by perform_implicit_conversion_flags. */ #define IMPLICIT_CONV_EXPR_BRACED_INIT(NODE) \ (TREE_LANG_FLAG_2 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* Nonzero means that an object of this type cannot be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) /* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) /* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) /* Nonzero if there is a non-trivial X::X(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) /* Nonzero if there is no trivial default constructor for this class. */ #define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE if the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that the default constructor is trivial. */ #define TYPE_HAS_TRIVIAL_DFLT(NODE) \ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRDATAMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is a reference. */ #define TYPE_REF_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE) /* Returns true if NODE is a pointer or a reference. */ #define INDIRECT_TYPE_P(NODE) \ (TYPE_PTR_P (NODE) || TYPE_REF_P (NODE)) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (!TYPE_REF_P (NODE) \ && !VOID_TYPE_P (NODE) \ && !FUNC_OR_METHOD_TYPE_P (NODE)) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TYPE_REF_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to function type. */ #define TYPE_PTRFN_P(NODE) \ (TYPE_PTR_P (NODE) \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function type. */ #define TYPE_REFFN_P(NODE) \ (TYPE_REF_P (NODE) \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE))) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTRMEM_P(NODE) \ (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Returns true if NODE is a pointer or a pointer-to-member. */ #define TYPE_PTR_OR_PTRMEM_P(NODE) \ (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\ cp_type_quals (NODE))) /* As above, but can be used in places that want an lvalue at the expense of not necessarily having the correct cv-qualifiers. */ #define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* The canonical internal RECORD_TYPE from the POINTER_TYPE to METHOD_TYPE. */ #define TYPE_PTRMEMFUNC_TYPE(NODE) \ TYPE_LANG_SLOT_1 (NODE) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) \ (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE))) /* The type in question for an UNDERLYING_TYPE. */ #define UNDERLYING_TYPE_TYPE(NODE) \ (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE))) /* The type in question for BASES. */ #define BASES_TYPE(NODE) \ (TYPE_VALUES_RAW (BASES_CHECK (NODE))) #define BASES_DIRECT(NODE) \ TREE_LANG_FLAG_0 (BASES_CHECK (NODE)) /* The expression in question for a DECLTYPE_TYPE. */ #define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE))) /* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an id-expression or a member-access expression. When false, it was parsed as a full expression. */ #define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag /* These flags indicate that we want different semantics from normal decltype: lambda capture just drops references, lambda proxies look through implicit dereference. */ #define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \ TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_REF_CAPTURE(NODE) \ TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a lambda capture field for an array of runtime bound. */ #define DECL_VLA_CAPTURE_P(NODE) \ DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE)) /* Nonzero for PARM_DECL node means that this is an array function parameter, i.e, a[] rather than *a. */ #define DECL_ARRAY_PARAMETER_P(NODE) \ DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE)) /* Nonzero for a FIELD_DECL who's NSMDI is currently being instantiated. */ #define DECL_INSTANTIATING_NSDMI_P(NODE) \ DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a simple (no explicit initializer) lambda capture field, making it invisible to name lookup in unevaluated contexts. */ #define DECL_NORMAL_CAPTURE_P(NODE) \ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) /* Define fields and accessors for nodes representing declared names. */ /* True if TYPE is an unnamed structured type with a typedef for linkage purposes. In that case TYPE_NAME and TYPE_STUB_DECL of the MAIN-VARIANT are different. */ #define TYPE_WAS_UNNAMED(NODE) \ (TYPE_NAME (TYPE_MAIN_VARIANT (NODE)) \ != TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_MIN_CHECK (NODE)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_PARMS(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_RESULT(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result /* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS lists all instantiations and specializations of the function so that tsubst_friend_function can reassign them to another template if we find that the namespace-scope template is really a partial instantiation of a friend template. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations, so that if we explicitly specialize a partial instantiation we can walk the list in maybe_process_partial_specialization and reassign them or complain as appropriate. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for other templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) \ DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE)) /* For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*, int'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL for the partial specialization. The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for other templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \ DECL_SIZE (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Nonzero for a raw template parameter node. */ #define TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \ || TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (NODE) == TEMPLATE_PARM_INDEX) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero for a DECL that represents a function template. */ #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a class template or alias template. */ #define DECL_TYPE_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL) /* Nonzero for a DECL that represents a class template. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a TEMPLATE_DECL that represents an alias template. */ #define DECL_ALIAS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE)) /* Nonzero if NODE declares a function. */ #define DECL_DECLARES_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, called the injected-class-name, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header and is not a partial specialization. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Nonzero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \ && !DECL_USE_TEMPLATE (DECL)) /* Nonzero if DECL is a function generated from a function 'temploid', i.e. template, member of class template, or dependent friend. */ #define DECL_TEMPLOID_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INSTANTIATION (DECL) \ || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL)) /* Nonzero if DECL is either defined implicitly by the compiler or generated from a temploid. */ #define DECL_GENERATED_P(DECL) \ (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (!processing_template_parmlist \ && processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. This flag does not indicate whether or not the decl is defined in the current translation unit; it indicates whether or not we should emit the decl at the end of compilation if it is defined and needed. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) \ && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE))) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_MIN_CHECK (FUNCTION_DECL_CHECK (DECL))->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE))) /* [coroutines] */ /* True if NODE is a co-routine FUNCTION_DECL. */ #define DECL_COROUTINE_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->coroutine_p) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as an expr in operand 1, and integer_zero_node or clauses in operand 0. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST \ || TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == OMP_CLAUSE) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_LOOPING_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE__CONDTEMP_)) /* Nonzero if this transaction expression's body contains statements. */ #define TRANSACTION_EXPR_IS_STMT(NODE) \ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) #define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3) #define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE)) /* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */ #define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4) /* RANGE_FOR_STMT accessors. These give access to the declarator, expression, body, and scope of the statement, respectively. */ #define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) #define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) #define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) #define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3) #define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4) #define RANGE_FOR_INIT_STMT(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 5) #define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE)) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) #define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3) /* True if there are case labels for all possible values of switch cond, either because there is a default: case label or because the case label ranges cover all values. */ #define SWITCH_STMT_ALL_CASES_P(NODE) \ TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE)) /* True if the body of a switch stmt contains no BREAK_STMTs. */ #define SWITCH_STMT_NO_BREAK_P(NODE) \ TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE)) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR is the result of list-initialization of a temporary. */ #define TARGET_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR expresses direct-initialization of an object to be named later. */ #define TARGET_EXPR_DIRECT_INIT_P(NODE) \ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) /* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if the initializer has void type, it's doing something more complicated. */ #define SIMPLE_TARGET_EXPR_P(NODE) \ (TREE_CODE (NODE) == TARGET_EXPR \ && TARGET_EXPR_INITIAL (NODE) \ && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE)))) /* True if EXPR expresses direct-initialization of a TYPE. */ #define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) /* True if this CONVERT_EXPR is for a conversion to virtual base in an NSDMI, and should be re-evaluated when used in a constructor. */ #define CONVERT_EXPR_VBASE_PATH(NODE) \ TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE)) /* True if SIZEOF_EXPR argument is type. */ #define SIZEOF_EXPR_TYPE_P(NODE) \ TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE)) /* True if the ALIGNOF_EXPR was spelled "alignof". */ #define ALIGNOF_EXPR_STD_P(NODE) \ TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE)) /* OMP_DEPOBJ accessors. These give access to the depobj expression of the #pragma omp depobj directive and the clauses, respectively. If OMP_DEPOBJ_CLAUSES is INTEGER_CST, it is instead the update clause kind or OMP_CLAUSE_DEPEND_LAST for destroy clause. */ #define OMP_DEPOBJ_DEPOBJ(NODE) TREE_OPERAND (OMP_DEPOBJ_CHECK (NODE), 0) #define OMP_DEPOBJ_CLAUSES(NODE) TREE_OPERAND (OMP_DEPOBJ_CHECK (NODE), 1) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type, /* "typename" types. */ scope_type /* namespace or tagged type name followed by :: */ }; /* The various kinds of lvalues we distinguish. */ enum cp_lvalue_kind_flags { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */ clk_class = 4, /* A prvalue of class or array type. */ clk_bitfield = 8, /* An lvalue for a bit-field. */ clk_packed = 16 /* An lvalue for a packed field. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum cp_lvalue_kind_flags. */ typedef int cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ }; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ }; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ /* The following are ordered, for use by member synthesis fns. */ sfk_destructor, /* A destructor. */ sfk_constructor, /* A constructor. */ sfk_inheriting_constructor, /* An inheriting constructor */ sfk_copy_constructor, /* A copy constructor. */ sfk_move_constructor, /* A move constructor. */ sfk_copy_assignment, /* A copy assignment operator. */ sfk_move_assignment, /* A move assignment operator. */ /* The following are unordered. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion, /* A conversion operator. */ sfk_deduction_guide, /* A class template deduction guide. */ sfk_comparison, /* A comparison operator (e.g. ==, <, <=>). */ sfk_virtual_destructor /* Used by member synthesis fns. */ }; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ }; enum duration_kind { dk_static, dk_thread, dk_auto, dk_dynamic }; /* Bitmask flags to control type substitution. */ enum tsubst_flags { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ tf_decltype = 1 << 7, /* We are the operand of decltype. Used to implement the special rules for calls in decltype (5.2.2/11). */ tf_partial = 1 << 8, /* Doing initial explicit argument substitution in fn_type_unification. */ tf_fndecl_type = 1 << 9, /* Substituting the type of a function declaration. */ tf_no_cleanup = 1 << 10, /* Do not build a cleanup (build_target_expr and friends) */ tf_norm = 1 << 11, /* Build diagnostic information during constraint normalization. */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; /* This type is used for parameters and variables which hold combinations of the flags in enum tsubst_flags. */ typedef int tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ enum base_access_flags { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum base_access_flags. */ typedef int base_access; /* The various kinds of access check during parsing. */ enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ }; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ }; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* Nonzero if we are inside eq_specializations, which affects comparison of PARM_DECLs in cp_tree_equal. */ extern int comparing_specializations; /* In parser.c. */ /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. This is a count since operands to sizeof can be nested. */ extern int cp_unevaluated_operand; /* RAII class used to inhibit the evaluation of operands during parsing and template instantiation. Evaluation warnings are also inhibited. */ class cp_unevaluated { public: cp_unevaluated (); ~cp_unevaluated (); }; /* The reverse: an RAII class used for nested contexts that are evaluated even if the enclosing context is not. */ class cp_evaluated { public: int uneval; int inhibit; cp_evaluated () : uneval(cp_unevaluated_operand), inhibit(c_inhibit_evaluation_warnings) { cp_unevaluated_operand = c_inhibit_evaluation_warnings = 0; } ~cp_evaluated () { cp_unevaluated_operand = uneval; c_inhibit_evaluation_warnings = inhibit; } }; /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT }; // An RAII class used to create a new pointer map for local // specializations. When the stack goes out of scope, the // previous pointer map is restored. enum lss_policy { lss_blank, lss_copy, lss_nop }; class local_specialization_stack { public: local_specialization_stack (lss_policy = lss_blank); ~local_specialization_stack (); hash_map<tree, tree> *saved; }; /* in class.c */ extern int current_class_depth; /* in decl.c */ /* An array of static vars & fns. */ extern GTY(()) vec<tree, va_gc> *static_decls; /* An array of vtable-needing types that have no key function, or have an emitted key function. */ extern GTY(()) vec<tree, va_gc> *keyed_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler doesn't allow '.' in symbol names. */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #else /* NO_DOT_IN_LABEL */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #else /* NO_DOLLAR_IN_LABEL */ #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ #define UDLIT_OP_ANSI_PREFIX "operator\"\"" #define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s" #define UDLIT_OP_MANGLED_PREFIX "li" #define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s" #define UDLIT_OPER_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ UDLIT_OP_ANSI_PREFIX, \ sizeof (UDLIT_OP_ANSI_PREFIX) - 1)) #define UDLIT_OP_SUFFIX(ID_NODE) \ (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1) #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. Two if we're done with front-end processing. */ extern int at_eof; /* True if note_mangling_alias should enqueue mangling aliases for later generation, rather than emitting them right away. */ extern bool defer_mangling_aliases; /* True if noexcept is part of the type (i.e. in C++17). */ extern bool flag_noexcept_type; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; /* Likewise, for thread local storage. */ extern GTY(()) tree tls_aggregates; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) #define LOOKUP_NORMAL (LOOKUP_PROTECT) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 1) /* Non-converting (i.e., "explicit") constructors are not tried. This flag indicates that we are not performing direct-initialization. */ #define LOOKUP_ONLYCONVERTING (1 << 2) #define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 3) /* We're performing a user-defined conversion, so more user-defined conversions are not permitted (only built-in conversions). */ #define LOOKUP_NO_CONVERSION (1 << 4) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 5) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 6) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 7) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 8) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1) /* We're trying to treat an lvalue as an rvalue. */ #define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1) /* We're inside an init-list, so narrowing conversions are ill-formed. */ #define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) /* We're looking up a constructor for list-initialization. */ #define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1) /* This is the first parameter of a copy constructor. */ #define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1) /* We only want to consider list constructors. */ #define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) /* Return after determining which function to call and checking access. Used by sythesized_method_walk to determine which functions will be called to initialize subobjects, in order to determine exception specification and possible implicit delete. This is kind of a hack, but exiting early avoids problems with trying to perform argument conversions when the class isn't complete yet. */ #define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) /* Used by calls from defaulted functions to limit the overload set to avoid cycles trying to declare them (core issue 1092). */ #define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) /* Used in calls to store_init_value to suppress its usual call to digest_init. */ #define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) /* An instantiation with explicit template arguments. */ #define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1) /* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */ #define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1) /* Used by case_conversion to disregard non-integral conversions. */ #define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1) /* Used for delegating constructors in order to diagnose self-delegation. */ #define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1) /* Allow initialization of a flexible array members. */ #define LOOKUP_ALLOW_FLEXARRAY_INIT (LOOKUP_DELEGATING_CONS << 1) /* Require constant initialization of a non-constant variable. */ #define LOOKUP_CONSTINIT (LOOKUP_ALLOW_FLEXARRAY_INIT << 1) /* We're looking for either a rewritten comparison operator candidate or the operator to use on the former's result. We distinguish between the two by knowing that comparisons other than == and <=> must be the latter, as must a <=> expression trying to rewrite to <=> without reversing. */ #define LOOKUP_REWRITTEN (LOOKUP_CONSTINIT << 1) /* Reverse the order of the two arguments for comparison rewriting. First we swap the arguments in add_operator_candidates, then we swap the conversions in add_candidate (so that they correspond to the original order of the args), then we swap the conversions back in build_new_op_1 (so they correspond to the order of the args in the candidate). */ #define LOOKUP_REVERSED (LOOKUP_REWRITTEN << 1) /* We're initializing an aggregate from a parenthesized list of values. */ #define LOOKUP_AGGREGATE_PAREN_INIT (LOOKUP_REVERSED << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 #define CONV_FORCE_TEMP 32 #define CONV_FOLD 64 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) #define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ #define COMPARE_STRUCTURAL 8 /* The comparison is intended to be structural. The actual comparison will be identical to COMPARE_STRICT. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Used with start_decl's initialized parameter. */ #define SD_UNINITIALIZED 0 #define SD_INITIALIZED 1 #define SD_DEFAULTED 2 #define SD_DELETED 3 /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) #define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \ TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))) #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* For a C++17 class deduction placeholder, the template it represents. */ #define CLASS_PLACEHOLDER_TEMPLATE(NODE) \ (DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE)))) /* Contexts in which auto deduction occurs. These flags are used to control diagnostics in do_auto_deduction. */ enum auto_deduction_context { adc_unspecified, /* Not given */ adc_variable_type, /* Variable initializer deduction */ adc_return_type, /* Return type deduction */ adc_unify, /* Template argument deduction */ adc_requirement, /* Argument deduction constraint */ adc_decomp_type /* Decomposition declaration initializer deduction */ }; /* True if this type-parameter belongs to a class template, used by C++17 class template argument deduction. */ #define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */ #define AUTO_IS_DECLTYPE(NODE) \ (TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the top-level entity. TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments identical to their defaults. TFF_NO_TEMPLATE_BINDINGS: do not print information about the template arguments for a function template specialization. TFF_POINTER: we are printing a pointer type. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) #define TFF_UNQUALIFIED_NAME (1 << 11) #define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) #define TFF_NO_TEMPLATE_BINDINGS (1 << 13) #define TFF_POINTER (1 << 14) /* These constants can be used as bit flags to control strip_typedefs. STF_USER_VISIBLE: use heuristics to try to avoid stripping user-facing aliases of internal details. This is intended for diagnostics, where it should (for example) give more useful "aka" types. STF_STRIP_DEPENDENT: allow the stripping of aliases with dependent template parameters, relying on code elsewhere to report any appropriate diagnostics. */ const unsigned int STF_USER_VISIBLE = 1U; const unsigned int STF_STRIP_DEPENDENT = 1U << 1; /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); /* Various flags for the overloaded operator information. */ enum ovl_op_flags { OVL_OP_FLAG_NONE = 0, /* Don't care. */ OVL_OP_FLAG_UNARY = 1, /* Is unary. */ OVL_OP_FLAG_BINARY = 2, /* Is binary. */ OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */ OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */ OVL_OP_FLAG_DELETE = 1, /* operator delete. */ OVL_OP_FLAG_VEC = 2 /* vector new or delete. */ }; /* Compressed operator codes. Order is determined by operators.def and does not match that of tree_codes. */ enum ovl_op_code { OVL_OP_ERROR_MARK, OVL_OP_NOP_EXPR, #define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE, #define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */ #include "operators.def" OVL_OP_MAX }; struct GTY(()) ovl_op_info_t { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The (regular) tree code. */ enum tree_code tree_code : 16; /* The (compressed) operator code. */ enum ovl_op_code ovl_op_code : 8; /* The ovl_op_flags of the operator */ unsigned flags : 8; }; /* Overloaded operator info indexed by ass_op_p & ovl_op_code. */ extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX]; /* Mapping from tree_codes to ovl_op_codes. */ extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES]; /* Mapping for ambi-ary operators from the binary to the unary. */ extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX]; /* Given an ass_op_p boolean and a tree code, return a pointer to its overloaded operator info. Tree codes for non-overloaded operators map to the error-operator. */ #define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \ (&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]]) /* Overloaded operator info for an identifier for which IDENTIFIER_OVL_OP_P is true. */ #define IDENTIFIER_OVL_OP_INFO(NODE) \ (&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)]) #define IDENTIFIER_OVL_OP_FLAGS(NODE) \ (IDENTIFIER_OVL_OP_INFO (NODE)->flags) inline tree ovl_op_identifier (bool isass, tree_code code) { return OVL_OP_INFO(isass, code)->identifier; } inline tree ovl_op_identifier (tree_code code) { return ovl_op_identifier (false, code); } #define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier) #define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier) /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* Non-static member functions have an optional virt-specifier-seq. There is a VIRT_SPEC value for each virt-specifier. They can be combined by bitwise-or to form the complete set of virt-specifiers for a member function. */ enum virt_specifier { VIRT_SPEC_UNSPECIFIED = 0x0, VIRT_SPEC_FINAL = 0x1, VIRT_SPEC_OVERRIDE = 0x2 }; /* A type-qualifier, or bitmask therefore, using the VIRT_SPEC constants. */ typedef int cp_virt_specifiers; /* Wherever there is a function-cv-qual, there could also be a ref-qualifier: [dcl.fct] The return type, the parameter-type-list, the ref-qualifier, and the cv-qualifier-seq, but not the default arguments or the exception specification, are part of the function type. REF_QUAL_NONE Ordinary member function with no ref-qualifier REF_QUAL_LVALUE Member function with the &-ref-qualifier REF_QUAL_RVALUE Member function with the &&-ref-qualifier */ enum cp_ref_qualifier { REF_QUAL_NONE = 0, REF_QUAL_LVALUE = 1, REF_QUAL_RVALUE = 2 }; /* A storage class. */ enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable }; /* An individual decl-specifier. This is used to index the array of locations for the declspecs in struct cp_decl_specifier_seq below. */ enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_alias, ds_constexpr, ds_complex, ds_constinit, ds_consteval, ds_thread, ds_type_spec, ds_redefined_builtin_type_spec, ds_attribute, ds_std_attribute, ds_storage_class, ds_long_long, ds_concept, ds_last /* This enumerator must always be the last one. */ }; /* A decl-specifier-seq. */ struct cp_decl_specifier_seq { /* An array of locations for the declaration sepecifiers, indexed by enum cp_decl_spec_word. */ location_t locations[ds_last]; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* The c++11 attributes that follows the type specifier. */ tree std_attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The explicit-specifier, if any. */ tree explicit_specifier; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* True iff TYPE_SPEC defines a class or enum. */ BOOL_BITFIELD type_definition_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff at least one type-specifier was found. */ BOOL_BITFIELD any_type_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "__intN" was explicitly provided. */ BOOL_BITFIELD explicit_intN_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; /* True iff ds_thread is set for __thread, not thread_local. */ BOOL_BITFIELD gnu_thread_keyword_p : 1; /* True iff the type is a decltype. */ BOOL_BITFIELD decltype_p : 1; /* True iff the alternate "__intN__" form of the __intN type has been used. */ BOOL_BITFIELD int_n_alt: 1; }; /* The various kinds of declarators. */ enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_decomp, cdk_error }; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is a template parameter pack. */ bool template_parameter_pack_p; /* Location within source. */ location_t loc; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ ENUM_BITFIELD (cp_declarator_kind) kind : 4; /* Whether we parsed an ellipsis (`...') just before the declarator, to indicate this is a parameter pack. */ BOOL_BITFIELD parameter_pack_p : 1; /* If this declarator is parenthesized, this the open-paren. It is UNKNOWN_LOCATION when not parenthesized. */ location_t parenthesized; location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and cdk_function. */ /* GNU Attributes that apply to this declarator. If the declarator is a pointer or a reference, these attribute apply to the type pointed to. */ tree attributes; /* Standard C++11 attributes that apply to this declarator. If the declarator is a pointer or a reference, these attributes apply to the pointer, rather than to the type pointed to. */ tree std_attributes; /* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator. For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function as a TREE_LIST of decl/default. */ tree parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The virt-specifiers for the function. */ cp_virt_specifiers virt_specifiers; /* The ref-qualifier for the function. */ cp_ref_qualifier ref_qualifier; /* The transaction-safety qualifier for the function. */ tree tx_qualifier; /* The exception-specification for the function. */ tree exception_specification; /* The late-specified return type, if any. */ tree late_return_type; /* The trailing requires-clause, if any. */ tree requires_clause; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; /* For cdk_reference */ struct { /* The cv-qualifiers for the reference. These qualifiers are only used to diagnose ill-formed code. */ cp_cv_quals qualifiers; /* Whether this is an rvalue reference */ bool rvalue_ref; } reference; } u; }; /* A level of template instantiation. */ struct GTY((chain_next ("%h.next"))) tinst_level { /* The immediately deeper level in the chain. */ struct tinst_level *next; /* The original node. TLDCL can be a DECL (for a function or static data member), a TYPE (for a class), depending on what we were asked to instantiate, or a TREE_LIST with the template as PURPOSE and the template args as VALUE, if we are substituting for overload resolution. In all these cases, TARGS is NULL. However, to avoid creating TREE_LIST objects for substitutions if we can help, we store PURPOSE and VALUE in TLDCL and TARGS, respectively. So TLDCL stands for TREE_LIST or DECL (the template is a DECL too), whereas TARGS stands for the template arguments. */ tree tldcl, targs; private: /* Return TRUE iff the original node is a split list. */ bool split_list_p () const { return targs; } /* Return TRUE iff the original node is a TREE_LIST object. */ bool tree_list_p () const { return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST; } /* Return TRUE iff the original node is not a list, split or not. */ bool not_list_p () const { return !split_list_p () && !tree_list_p (); } /* Convert (in place) the original node from a split list to a TREE_LIST. */ tree to_list (); public: /* Release storage for OBJ and node, if it's a TREE_LIST. */ static void free (tinst_level *obj); /* Return TRUE iff the original node is a list, split or not. */ bool list_p () const { return !not_list_p (); } /* Return the original node; if it's a split list, make it a TREE_LIST first, so that it can be returned as a single tree object. */ tree get_node () { if (!split_list_p ()) return tldcl; else return to_list (); } /* Return the original node if it's a DECL or a TREE_LIST, but do NOT convert a split list to a TREE_LIST: return NULL instead. */ tree maybe_get_node () const { if (!split_list_p ()) return tldcl; else return NULL_TREE; } /* The location where the template is instantiated. */ location_t locus; /* errorcount + sorrycount when we pushed this level. */ unsigned short errors; /* Count references to this object. If refcount reaches refcount_infinity value, we don't increment or decrement the refcount anymore, as the refcount isn't accurate anymore. The object can be still garbage collected if unreferenced from anywhere, which might keep referenced objects referenced longer than otherwise necessary. Hitting the infinity is rare though. */ unsigned short refcount; /* Infinity value for the above refcount. */ static const unsigned short refcount_infinity = (unsigned short) ~0; }; /* BUILT_IN_FRONTEND function codes. */ enum cp_built_in_function { CP_BUILT_IN_IS_CONSTANT_EVALUATED, CP_BUILT_IN_INTEGER_PACK, CP_BUILT_IN_SOURCE_LOCATION, CP_BUILT_IN_LAST }; bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec); /* Return the type of the `this' parameter of FNTYPE. */ inline tree type_of_this_parm (const_tree fntype) { function_args_iterator iter; gcc_assert (TREE_CODE (fntype) == METHOD_TYPE); function_args_iter_init (&iter, fntype); return function_args_iter_cond (&iter); } /* Return the class of the `this' parameter of FNTYPE. */ inline tree class_of_this_parm (const_tree fntype) { return TREE_TYPE (type_of_this_parm (fntype)); } /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* Various dump ids. */ extern int class_dump_id; extern int raw_dump_id; /* in call.c */ extern bool check_dtor_name (tree, tree); int magic_varargs_p (tree); extern tree build_conditional_expr (const op_location_t &, tree, tree, tree, tsubst_flags_t); extern tree build_addr_func (tree, tsubst_flags_t); extern void set_flags_from_callee (tree); extern tree build_call_a (tree, int, tree*); extern tree build_call_n (tree, int, ...); extern bool null_ptr_cst_p (tree); extern bool null_member_pointer_value_p (tree); extern bool sufficient_parms_p (const_tree); extern tree type_decays_to (tree); extern tree extract_call_expr (tree); extern tree build_trivial_dtor_call (tree); extern tree build_user_type_conversion (tree, tree, int, tsubst_flags_t); extern tree build_new_function_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *, tree *, tree, tree, tree *, tsubst_flags_t); extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **, tree, int, tsubst_flags_t); extern tree build_new_op (const op_location_t &, enum tree_code, int, tree, tree, tree, tree *, tsubst_flags_t); extern tree build_op_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool aligned_allocation_fn_p (tree); extern tree destroying_delete_p (tree); extern bool usual_deallocation_fn_p (tree); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree, tsubst_flags_t); extern bool can_convert (tree, tree, tsubst_flags_t); extern bool can_convert_standard (tree, tree, tsubst_flags_t); extern bool can_convert_arg (tree, tree, tree, int, tsubst_flags_t); extern bool can_convert_arg_bad (tree, tree, tree, int, tsubst_flags_t); extern int conv_flags (int, int, tree, tree, int); extern struct conversion * good_conversion (tree, tree, tree, int, tsubst_flags_t); extern location_t get_fndecl_argument_location (tree, int); extern void complain_about_bad_argument (location_t arg_loc, tree from_type, tree to_type, tree fndecl, int parmnum); extern void maybe_inform_about_fndecl_for_bogus_argument_init (tree, int); /* A class for recording information about access failures (e.g. private fields), so that we can potentially supply a fix-it hint about an accessor (from a context in which the constness of the object is known). */ class access_failure_info { public: access_failure_info () : m_was_inaccessible (false), m_basetype_path (NULL_TREE), m_decl (NULL_TREE), m_diag_decl (NULL_TREE) {} void record_access_failure (tree basetype_path, tree decl, tree diag_decl); bool was_inaccessible_p () const { return m_was_inaccessible; } tree get_decl () const { return m_decl; } tree get_diag_decl () const { return m_diag_decl; } tree get_any_accessor (bool const_p) const; void maybe_suggest_accessor (bool const_p) const; static void add_fixit_hint (rich_location *richloc, tree accessor); private: bool m_was_inaccessible; tree m_basetype_path; tree m_decl; tree m_diag_decl; }; extern void complain_about_access (tree, tree, bool); extern bool enforce_access (tree, tree, tree, tsubst_flags_t, access_failure_info *afi = NULL); extern void push_defarg_context (tree); extern void pop_defarg_context (void); extern tree convert_default_arg (tree, tree, tree, int, tsubst_flags_t); extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t); extern tree build_x_va_arg (location_t, tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t); extern bool is_properly_derived_from (tree, tree); extern tree initialize_reference (tree, tree, int, tsubst_flags_t); extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**, tree * = NULL); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern bool type_has_extended_temps (tree); extern tree strip_top_quals (tree); extern bool reference_related_p (tree, tree); extern bool reference_compatible_p (tree, tree); extern int remaining_arguments (tree); extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t); extern tree build_converted_constant_bool_expr (tree, tsubst_flags_t); extern tree perform_direct_initialization_if_possible (tree, tree, bool, tsubst_flags_t); extern vec<tree,va_gc> *resolve_args (vec<tree,va_gc>*, tsubst_flags_t); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, int, tree *, tsubst_flags_t, tree = NULL_TREE); extern bool is_std_init_list (tree); extern bool is_list_ctor (tree); extern void validate_conversion_obstack (void); extern void mark_versions_used (tree); extern bool unsafe_return_slot_p (tree); extern bool cp_warn_deprecated_use (tree, tsubst_flags_t = tf_warning_or_error); extern void cp_warn_deprecated_use_scopes (tree); extern tree get_function_version_dispatcher (tree); /* in class.c */ extern tree build_vfield_ref (tree, tree); extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node); extern tree build_base_path (enum tree_code, tree, tree, int, tsubst_flags_t); extern tree convert_to_base (tree, tree, bool, bool, tsubst_flags_t); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern bool add_method (tree, tree, bool); extern tree declared_access (tree); extern tree currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree outermost_open_class (void); extern tree current_nonlambda_class_type (void); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int * = NULL); extern void init_class_processing (void); extern int is_empty_class (tree); extern bool is_really_empty_class (tree, bool); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void build_self_reference (void); extern int same_signature_p (const_tree, const_tree); extern tree lookup_vfn_in_binfo (tree, tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern bool vptr_via_virtual_p (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); extern bool default_ctor_p (const_tree); extern bool type_has_user_nondefault_constructor (tree); extern tree in_class_defaulted_default_constructor (tree); extern bool user_provided_p (tree); extern bool type_has_user_provided_constructor (tree); extern bool type_has_non_user_provided_default_constructor (tree); extern bool vbase_has_user_provided_move_assign (tree); extern tree default_init_uninitialized_part (tree); extern bool trivial_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_constexpr_destructor (tree); extern bool type_has_virtual_destructor (tree); extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared); extern bool classtype_has_non_deleted_move_ctor (tree); extern bool classtype_has_non_deleted_copy_ctor (tree); extern tree classtype_has_depr_implicit_copy (tree); extern bool classtype_has_op (tree, tree_code); extern tree classtype_has_defaulted_op (tree, tree_code); extern bool type_build_ctor_call (tree); extern bool type_build_dtor_call (tree); extern void explain_non_literal_class (tree); extern void inherit_targ_abi_tags (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); extern void check_abi_tags (tree); extern tree missing_abi_tags (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree copy_fndecl_with_name (tree, tree); extern void clone_function_decl (tree, bool); extern void adjust_clone_args (tree); extern void deduce_noexcept_on_destructor (tree); extern bool uniquely_derived_from_p (tree, tree); extern bool publicly_uniquely_derived_p (tree, tree); extern tree common_enclosing_class (tree, tree); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree, tsubst_flags_t); extern tree convert_from_reference (tree); extern tree force_rvalue (tree, tsubst_flags_t); extern tree ocp_convert (tree, tree, int, int, tsubst_flags_t); extern tree cp_convert (tree, tree, tsubst_flags_t); extern tree cp_convert_and_check (tree, tree, tsubst_flags_t); extern tree cp_fold_convert (tree, tree); extern tree cp_get_callee (tree); extern tree cp_get_callee_fndecl (tree); extern tree cp_get_callee_fndecl_nofold (tree); extern tree cp_get_fndecl_from_callee (tree, bool fold = true); extern tree convert_to_void (tree, impl_conv_void, tsubst_flags_t); extern tree convert_force (tree, tree, int, tsubst_flags_t); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern bool can_convert_qual (tree, tree); extern tree perform_qualification_conversions (tree, tree); extern bool tx_safe_fn_type_p (tree); extern tree tx_unsafe_fn_variant (tree); extern bool fnptr_conv_p (tree, tree); extern tree strip_fnptr_conv (tree); /* in name-lookup.c */ extern void maybe_push_cleanup_level (tree); extern tree maybe_push_decl (tree); extern tree current_decl_namespace (void); /* decl.c */ extern tree poplevel (int, int, int); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern void note_break_stmt (void); extern bool note_iteration_stmt_body_start (void); extern void note_iteration_stmt_body_end (bool); extern void determine_local_discriminator (tree); extern int decls_match (tree, tree, bool = true); extern bool maybe_version_functions (tree, tree, bool); extern tree duplicate_decls (tree, tree, bool); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree build_typename_type (tree, tree, tree, tag_types); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree build_library_fn_ptr (const char *, tree, int); extern tree build_cp_library_fn_ptr (const char *, tree, int); extern tree push_library_fn (tree, tree, tree, int); extern tree push_void_library_fn (tree, tree, int); extern tree push_throw_library_fn (tree, tree); extern void warn_misplaced_attr_for_class_type (location_t location, tree class_type); extern tree check_tag_decl (cp_decl_specifier_seq *, bool); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern bool check_array_initializer (tree, tree, tree); extern void omp_declare_variant_finalize (tree, tree); extern void cp_finish_decl (tree, tree, bool, tree, int); extern tree lookup_decomp_type (tree); extern void cp_maybe_mangle_decomp (tree, tree, unsigned int); extern void cp_finish_decomp (tree, tree, unsigned int); extern int cp_complete_array_type (tree *, tree, bool); extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, tree, cp_cv_quals); extern tree grokparms (tree, tree *); extern int copy_fn_p (const_tree); extern bool move_fn_p (const_tree); extern bool move_signature_fn_p (const_tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern bool grok_ctor_properties (const_tree, const_tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern void xref_basetypes (tree, tree); extern tree start_enum (tree, tree, tree, tree, bool, bool *); extern void finish_enum_value_list (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree, tree, location_t); extern tree lookup_enumerator (tree, tree); extern bool start_preparsed_function (tree, tree, int); extern bool start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (bool); extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); extern void maybe_register_incomplete_var (tree); extern void maybe_commonize_var (tree); extern void complete_vars (tree); extern tree static_fn_type (tree); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern tree compute_array_index_type (tree, tree, tsubst_flags_t); extern tree check_default_argument (tree, tree, tsubst_flags_t); extern int wrapup_namespace_globals (); extern tree create_implicit_typedef (tree, tree); extern int local_variable_p (const_tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree cxx_builtin_function (tree decl); extern tree cxx_builtin_function_ext_scope (tree decl); extern tree cxx_simulate_builtin_function_decl (tree); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern tree cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern bool is_direct_enum_init (tree, tree); extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *); extern tree check_var_type (tree, tree, location_t); extern tree reshape_init (tree, tree, tsubst_flags_t); extern tree next_initializable_field (tree); extern tree fndecl_declared_return_type (tree); extern bool undeduced_auto_decl (tree); extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error); extern tree finish_case_label (location_t, tree, tree); extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t); extern bool check_array_designated_initializer (constructor_elt *, unsigned HOST_WIDE_INT); extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t); extern tree build_explicit_specifier (tree, tsubst_flags_t); extern void do_push_parm_decls (tree, tree, tree *); /* in decl2.c */ extern void record_mangling (tree, bool); extern void overwrite_mangling (tree, tree); extern void note_mangling_alias (tree, tree); extern void generate_mangling_aliases (void); extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier); extern tree build_pointer_ptrmemfn_type (tree); extern tree change_return_type (tree, tree); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern bool vague_linkage_p (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (location_t, tree, tree, bool); extern tree delete_sanity (location_t, tree, tree, bool, int, tsubst_flags_t); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree, tree, tree); extern tree splice_template_attributes (tree *, tree); extern bool any_dependent_type_attributes_p (tree); extern tree cp_reconstruct_complex_type (tree, tree); extern bool attributes_naming_typedef_ok (tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cxx_post_compilation_parsing_cleanups (void); extern tree coerce_new_type (tree, location_t); extern void coerce_delete_type (tree, location_t); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void reset_type_linkage (tree); extern void tentative_decl_linkage (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool decl_defined_p (tree); extern bool decl_constant_var_p (tree); extern bool decl_maybe_constant_var_p (tree); extern void no_linkage_error (tree); extern void check_default_args (tree); extern bool mark_used (tree); extern bool mark_used (tree, tsubst_flags_t); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree, tree); extern void copy_linkage (tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree, bool); extern tree set_guard (tree); extern tree maybe_get_tls_wrapper_call (tree); extern void mark_needed (tree); extern bool decl_needed_p (tree); extern void note_vague_linkage_fn (tree); extern void note_variable_template_instantiation (tree); extern tree build_artificial_parm (tree, tree, tree); extern bool possibly_inlined_p (tree); extern int parm_index (tree); extern tree vtv_start_verification_constructor_init_function (void); extern tree vtv_finish_verification_constructor_init_function (tree); extern bool cp_omp_mappable_type (tree); extern bool cp_omp_emit_unmappable_type_notes (tree); extern void cp_check_const_attributes (tree); /* in error.c */ extern const char *type_as_string (tree, int); extern const char *type_as_string_translate (tree, int); extern const char *decl_as_string (tree, int); extern const char *decl_as_string_translate (tree, int); extern const char *decl_as_dwarf_string (tree, int); extern const char *expr_as_string (tree, int); extern const char *expr_to_string (tree); extern const char *lang_decl_name (tree, int, bool); extern const char *lang_decl_dwarf_name (tree, int, bool); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void maybe_warn_variadic_templates (void); extern void maybe_warn_cpp0x (cpp0x_warn_str str); extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern location_t location_of (tree); extern void qualified_name_lookup_error (tree, tree, tree, location_t); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (location_t, tree); extern int nothrow_libfn_p (const_tree); extern void check_handlers (tree); extern tree finish_noexcept_expr (tree, tsubst_flags_t); extern bool expr_noexcept_p (tree, tsubst_flags_t); extern void perform_deferred_noexcept_checks (void); extern bool nothrow_spec_p (const_tree); extern bool type_noexcept_p (const_tree); extern bool type_throw_all_p (const_tree); extern tree build_noexcept_spec (tree, tsubst_flags_t); extern void choose_personality_routine (enum languages); extern tree build_must_not_throw_expr (tree,tree); extern tree eh_type_info (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern tree build_eh_type_type (tree); extern tree cp_protect_cleanup_actions (void); extern void maybe_splice_retval_cleanup (tree); extern tree maybe_set_retval_sentinel (void); extern tree template_parms_to_args (tree); extern tree template_parms_level_to_args (tree); extern tree generic_targs_for (tree); /* in expr.c */ extern tree cplus_expand_constant (tree); extern tree mark_use (tree expr, bool rvalue_p, bool read_p, location_t = UNKNOWN_LOCATION, bool reject_builtin = true); extern tree mark_rvalue_use (tree, location_t = UNKNOWN_LOCATION, bool reject_builtin = true); extern tree mark_lvalue_use (tree); extern tree mark_lvalue_use_nonread (tree); extern tree mark_type_use (tree); extern tree mark_discarded_use (tree); extern void mark_exp_read (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); extern void set_global_friend (tree); extern bool is_global_friend (tree); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int, tsubst_flags_t); extern int is_class_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_value_init (tree, tsubst_flags_t); extern tree build_value_init_noctor (tree, tsubst_flags_t); extern tree get_nsdmi (tree, bool, tsubst_flags_t); extern tree build_offset_ref (tree, tree, bool, tsubst_flags_t); extern tree throw_bad_array_new_length (void); extern bool type_has_new_extended_alignment (tree); extern unsigned malloc_alignment (void); extern tree build_new_constexpr_heap_type (tree, tree, tree); extern tree build_new (location_t, vec<tree, va_gc> **, tree, tree, vec<tree, va_gc> **, int, tsubst_flags_t); extern tree get_temp_regvar (tree, tree); extern tree build_vec_init (tree, tree, tree, bool, int, tsubst_flags_t); extern tree build_delete (location_t, tree, tree, special_function_kind, int, int, tsubst_flags_t); extern void push_base_cleanups (void); extern tree build_vec_delete (location_t, tree, tree, special_function_kind, int, tsubst_flags_t); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree scalar_constant_value (tree); extern tree decl_constant_value (tree, bool); extern tree decl_really_constant_value (tree, bool = true); extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); extern tree build_vtbl_address (tree); extern bool maybe_reject_flexarray_init (tree, tree); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree, location_t = UNKNOWN_LOCATION); extern tree unqualified_fn_lookup_error (cp_expr); extern tree make_conv_op_name (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern void fit_decomposition_lang_decl (tree, tree); extern tree copy_decl (tree CXX_MEM_STAT_INFO); extern tree copy_type (tree CXX_MEM_STAT_INFO); extern tree cxx_make_type (enum tree_code CXX_MEM_STAT_INFO); extern tree make_class_type (enum tree_code CXX_MEM_STAT_INFO); extern const char *get_identifier_kind_name (tree); extern void set_identifier_kind (tree, cp_identifier_kind); extern bool cxx_init (void); extern void cxx_finish (void); extern bool in_main_input_context (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern bool trivial_fn_p (tree); extern tree forward_parm (tree); extern bool is_trivially_xible (enum tree_code, tree, tree); extern bool is_xible (enum tree_code, tree, tree); extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error); extern void after_nsdmi_defaulted_late_checks (tree); extern bool maybe_explain_implicit_delete (tree); extern void explain_implicit_non_constexpr (tree); extern void deduce_inheriting_ctor (tree); extern bool decl_remember_implicit_trigger_p (tree); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (const_tree, tree); extern int num_artificial_parms_for (const_tree); extern tree make_alias_for (tree, tree); extern tree get_copy_ctor (tree, tsubst_flags_t); extern tree get_copy_assign (tree); extern tree get_default_ctor (tree); extern tree get_dtor (tree, tsubst_flags_t); extern tree strip_inheriting_ctors (tree); extern tree inherited_ctor_binfo (tree); extern bool ctor_omit_inherited_parms (tree); extern tree locate_ctor (tree); extern tree implicitly_declare_fn (special_function_kind, tree, bool, tree, tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* In parser.c */ extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool, unsigned short); extern void cp_convert_omp_range_for (tree &, vec<tree, va_gc> *, tree &, tree &, tree &, tree &, tree &, tree &); extern void cp_finish_omp_range_for (tree, tree); extern bool parsing_nsdmi (void); extern bool parsing_default_capturing_generic_lambda_in_template (void); extern void inject_this_parameter (tree, cp_cv_quals); extern location_t defparse_location (tree); extern void maybe_show_extern_c_location (void); extern bool literal_integer_zerop (const_tree); /* in pt.c */ extern void push_access_scope (tree); extern void pop_access_scope (tree); extern bool check_template_shadow (tree); extern bool check_auto_in_tmpl_args (tree, tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern void check_unqualified_spec_or_inst (tree, location_t); extern tree check_explicit_specialization (tree, tree, int, int, tree = NULL_TREE); extern int num_template_headers_for_class (tree); extern void check_template_variable (tree); extern tree make_auto (void); extern tree make_decltype_auto (void); extern tree make_constrained_auto (tree, tree); extern tree make_constrained_decltype_auto (tree, tree); extern tree make_template_placeholder (tree); extern bool template_placeholder_p (tree); extern bool ctad_template_p (tree); extern tree do_auto_deduction (tree, tree, tree, tsubst_flags_t = tf_warning_or_error, auto_deduction_context = adc_unspecified, tree = NULL_TREE, int = LOOKUP_NORMAL); extern tree type_uses_auto (tree); extern tree type_uses_auto_or_concept (tree); extern void append_type_to_template_for_access_check (tree, tree, tree, location_t); extern tree convert_generic_types_to_packs (tree, int, int); extern tree splice_late_return_type (tree, tree); extern bool is_auto (const_tree); extern tree process_template_parm (tree, location_t, tree, bool, bool); extern tree end_template_parm_list (tree); extern void end_template_parm_list (void); extern void end_template_decl (void); extern tree maybe_update_decl_type (tree, tree); extern bool check_default_tmpl_args (tree, tree, bool, bool, int); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern tree add_inherited_template_parms (tree, tree); extern void template_parm_level_and_index (tree, int*, int*); extern bool redeclare_class_template (tree, tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern tree lookup_template_variable (tree, tree); extern int uses_template_parms (tree); extern bool uses_template_parms_level (tree, int); extern bool in_template_function (void); extern bool need_generic_capture (void); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern tree fn_type_unification (tree, tree, tree, const tree *, unsigned int, tree, unification_kind_t, int, struct conversion **, bool, bool); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern bool always_instantiate_p (tree); extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error); extern tree instantiate_decl (tree, bool, bool); extern int comp_template_parms (const_tree, const_tree); extern bool template_heads_equivalent_p (const_tree, const_tree); extern bool builtin_pack_fn_p (tree); extern tree uses_parameter_packs (tree); extern bool template_parameter_pack_p (const_tree); extern bool function_parameter_pack_p (const_tree); extern bool function_parameter_expanded_from_pack_p (tree, tree); extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error); extern bool check_for_bare_parameter_packs (tree, location_t = UNKNOWN_LOCATION); extern tree build_template_info (tree, tree); extern tree get_template_info (const_tree); extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern tree get_pattern_parm (tree, tree); extern int comp_template_args (tree, tree, tree * = NULL, tree * = NULL, bool = false); extern int template_args_equal (tree, tree, bool = false); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern tree most_specialized_partial_spec (tree, tsubst_flags_t); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, int, tree, tree, tsubst_flags_t); extern tree tsubst (tree, tree, tsubst_flags_t, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree tsubst_expr (tree, tree, tsubst_flags_t, tree, bool); extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree); extern tree tsubst_argument_pack (tree, tree, tsubst_flags_t, tree); extern tree tsubst_template_args (tree, tree, tsubst_flags_t, tree); extern tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree); extern tree tsubst_function_parms (tree, tree, tsubst_flags_t, tree); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern bool problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern struct tinst_level *current_instantiation(void); extern bool instantiating_current_function_p (void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool dependent_scope_p (tree); extern bool any_dependent_template_arguments_p (const_tree); extern bool any_erroneous_template_args_p (const_tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool type_dependent_object_expression_p (tree); extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *); extern bool any_type_dependent_elements_p (const_tree); extern bool type_dependent_expression_p_push (tree); extern bool value_dependent_expression_p (tree); extern bool instantiation_dependent_expression_p (tree); extern bool instantiation_dependent_uneval_expression_p (tree); extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern void make_args_non_dependent (vec<tree, va_gc> *); extern bool reregister_specialization (tree, tree, tree); extern tree instantiate_non_dependent_expr (tree); extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t); extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t); extern tree instantiate_non_dependent_or_null (tree); extern bool variable_template_specialization_p (tree); extern bool alias_type_or_template_p (tree); enum { nt_opaque = false, nt_transparent = true }; extern tree alias_template_specialization_p (const_tree, bool); extern tree dependent_alias_template_spec_p (const_tree, bool); extern bool template_parm_object_p (const_tree); extern tree tparm_object_argument (tree); extern bool explicit_class_specialization_p (tree); extern bool push_tinst_level (tree); extern bool push_tinst_level_loc (tree, location_t); extern void pop_tinst_level (void); extern struct tinst_level *outermost_tinst_level(void); extern void init_template_processing (void); extern void print_template_statistics (void); bool template_template_parameter_p (const_tree); bool template_type_parameter_p (const_tree); extern bool primary_template_specialization_p (const_tree); extern tree get_primary_template_innermost_parameters (const_tree); extern tree get_template_parms_at_level (tree, int); extern tree get_template_innermost_arguments (const_tree); extern tree get_template_argument_pack_elems (const_tree); extern tree get_function_template_decl (const_tree); extern tree resolve_nondeduced_context (tree, tsubst_flags_t); extern tree resolve_nondeduced_context_or_error (tree, tsubst_flags_t); extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); extern tree coerce_template_parms (tree, tree, tree); extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t); extern tree canonicalize_type_argument (tree, tsubst_flags_t); extern void register_local_specialization (tree, tree); extern tree retrieve_local_specialization (tree); extern tree extract_fnparm_pack (tree, tree *); extern tree template_parm_to_arg (tree); extern tree dguide_name (tree); extern bool dguide_name_p (tree); extern bool deduction_guide_p (const_tree); extern bool copy_guide_p (const_tree); extern bool template_guide_p (const_tree); extern void store_explicit_specifier (tree, tree); extern tree add_outermost_template_args (tree, tree); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree, tsubst_flags_t); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree, tsubst_flags_t); extern tree build_headof (tree); extern tree build_dynamic_cast (location_t, tree, tree, tsubst_flags_t); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *, tsubst_flags_t); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern int accessible_in_template_p (tree, tree); extern tree lookup_field (tree, tree, int, bool); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool, tsubst_flags_t, access_failure_info *afi = NULL); extern tree lookup_member_fuzzy (tree, tree, bool); extern tree locate_field_accessor (tree, tree, bool); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern bool binfo_direct_p (tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ()); extern bool maybe_check_overriding_exception_spec (tree, tree); /* The representation of a deferred access check. */ struct GTY(()) deferred_access_check { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; /* The location of this access. */ location_t loc; }; /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void); extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *); extern void pop_to_parent_deferring_access_checks (void); extern bool perform_access_checks (vec<deferred_access_check, va_gc> *, tsubst_flags_t); extern bool perform_deferred_access_checks (tsubst_flags_t); extern bool perform_or_defer_access_check (tree, tree, tree, tsubst_flags_t, access_failure_info *afi = NULL); /* RAII sentinel to ensures that deferred access checks are popped before a function returns. */ class deferring_access_check_sentinel { public: deferring_access_check_sentinel (enum deferring_kind kind = dk_deferred) { push_deferring_access_checks (kind); } ~deferring_access_check_sentinel () { pop_deferring_access_checks (); } }; extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void break_maybe_infinite_loop (void); extern void add_decl_expr (tree); extern tree maybe_cleanup_point_expr_void (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern tree finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree, bool, unsigned short); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree, bool, unsigned short); extern tree finish_return_stmt (tree); extern tree begin_for_scope (tree *); extern tree begin_for_stmt (tree, tree); extern void finish_init_stmt (tree); extern void finish_for_cond (tree, tree, bool, unsigned short); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree begin_range_for_stmt (tree, tree); extern void finish_range_for_decl (tree, tree, tree); extern void finish_range_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); extern bool is_this_parameter (tree); enum { BCS_NORMAL = 0, BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4, BCS_TRANSACTION = 8 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (location_t, int, tree, tree, tree, tree, tree, bool); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern cp_expr finish_parenthesized_expr (cp_expr); extern tree force_paren_expr (tree, bool = false); inline tree force_paren_expr_uneval (tree t) { return force_paren_expr (t, true); } extern tree maybe_undo_parenthesized_ref (tree); extern tree maybe_strip_ref_conversion (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree stmt_expr_value_expr (tree); bool empty_expr_stmt_p (tree); extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *, tsubst_flags_t); extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool, bool, tsubst_flags_t); extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error); extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error); extern cp_expr finish_increment_expr (cp_expr, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t); extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr, tsubst_flags_t); /* Whether this call to finish_compound_literal represents a C++11 functional cast or a C99 compound literal. */ enum fcl_t { fcl_functional, fcl_c99 }; extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern bool outer_automatic_var_p (tree); extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false); extern cp_expr finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **, location_t); extern tree finish_typeof (tree); extern tree finish_underlying_type (tree); extern tree calculate_bases (tree, tsubst_flags_t); extern tree finish_bases (tree, bool); extern tree calculate_direct_bases (tree, tsubst_flags_t); extern tree finish_offsetof (tree, tree, location_t); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void emit_associated_thunks (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern bool expand_or_defer_fn_1 (tree); extern void expand_or_defer_fn (tree); extern void add_typedef_to_current_template_for_access_check (tree, tree, location_t); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool, tsubst_flags_t); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern tree omp_reduction_id (enum tree_code, tree, tree); extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *); extern void cp_check_omp_declare_reduction (tree); extern void finish_omp_declare_simd_methods (tree); extern tree finish_omp_clauses (tree, enum c_omp_region_type); extern tree push_omp_privatization_clauses (bool); extern void pop_omp_privatization_clauses (tree); extern void save_omp_privatization_clauses (vec<tree> &); extern void restore_omp_privatization_clauses (vec<tree> &); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree finish_oacc_data (tree, tree); extern tree finish_oacc_host_data (tree, tree); extern tree finish_omp_construct (enum tree_code, tree, tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, enum tree_code, tree, tree, tree, tree, tree, tree, tree, vec<tree> *, tree); extern tree finish_omp_for_block (tree, tree); extern void finish_omp_atomic (location_t, enum tree_code, enum tree_code, tree, tree, tree, tree, tree, tree, enum omp_memory_order); extern void finish_omp_barrier (void); extern void finish_omp_depobj (location_t, tree, enum omp_clause_depend_kind, tree); extern void finish_omp_flush (int); extern void finish_omp_taskwait (void); extern void finish_omp_taskyield (void); extern void finish_omp_cancel (tree); extern void finish_omp_cancellation_point (tree); extern tree omp_privatize_field (tree, bool); extern tree begin_transaction_stmt (location_t, tree *, int); extern void finish_transaction_stmt (tree, tree, int, tree); extern tree build_transaction_expr (location_t, tree, int, tree); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool, bool); extern tree baselink_for_fns (tree); extern void finish_static_assert (tree, tree, location_t, bool); extern tree finish_decltype_type (tree, bool, tsubst_flags_t); extern tree finish_trait_expr (location_t, enum cp_trait_kind, tree, tree); extern tree build_lambda_expr (void); extern tree build_lambda_object (tree); extern tree begin_lambda_type (tree); extern tree lambda_capture_field_type (tree, bool, bool); extern tree lambda_return_type (tree); extern tree lambda_proxy_type (tree); extern tree lambda_function (tree); extern void apply_deduced_return_type (tree, tree); extern tree add_capture (tree, tree, tree, bool, bool); extern tree add_default_capture (tree, tree, tree); extern void insert_capture_proxy (tree); extern void insert_pending_capture_proxies (void); extern bool is_capture_proxy (tree); extern bool is_normal_capture_proxy (tree); extern bool is_constant_capture_proxy (tree); extern void register_capture_members (tree); extern tree lambda_expr_this_capture (tree, int); extern void maybe_generic_this_capture (tree, tree); extern tree maybe_resolve_dummy (tree, bool); extern tree current_nonlambda_function (void); extern tree nonlambda_method_basetype (void); extern tree current_nonlambda_scope (void); extern tree current_lambda_expr (void); extern bool generic_lambda_fn_p (tree); extern tree do_dependent_capture (tree, bool = false); extern bool lambda_fn_in_template_p (tree); extern void maybe_add_lambda_conv_op (tree); extern bool is_lambda_ignored_entity (tree); extern bool lambda_static_thunk_p (tree); extern tree finish_builtin_launder (location_t, tree, tsubst_flags_t); extern tree cp_build_vec_convert (tree, location_t, tree, tsubst_flags_t); extern void start_lambda_scope (tree); extern void record_lambda_scope (tree); extern void record_null_lambda_scope (tree); extern void finish_lambda_scope (void); extern tree start_lambda_function (tree fn, tree lambda_expr); extern void finish_lambda_function (tree body); /* in tree.c */ extern int cp_tree_operand_length (const_tree); extern int cp_tree_code_length (enum tree_code); extern void cp_free_lang_data (tree t); extern tree force_target_expr (tree, tree, tsubst_flags_t); extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t); extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN ATTRIBUTE_COLD; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern void init_tree (void); extern bool pod_type_p (const_tree); extern bool layout_pod_type_p (const_tree); extern bool std_layout_type_p (const_tree); extern bool trivial_type_p (const_tree); extern bool trivially_copyable_p (const_tree); extern bool type_has_unique_obj_representations (const_tree); extern bool scalarish_type_p (const_tree); extern bool structural_type_p (tree, bool = false); extern bool type_has_nontrivial_default_init (const_tree); extern bool type_has_nontrivial_copy_init (const_tree); extern void maybe_warn_parm_abi (tree, location_t); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern bool zero_init_expr_p (tree); extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree); extern bool check_abi_tag_args (tree, tree); extern tree strip_typedefs (tree, bool * = NULL, unsigned int = 0); extern tree strip_typedefs_expr (tree, bool * = NULL, unsigned int = 0); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (const_tree); extern cp_lvalue_kind real_lvalue_p (const_tree); extern cp_lvalue_kind lvalue_kind (const_tree); extern bool glvalue_p (const_tree); extern bool obvalue_p (const_tree); extern bool xvalue_p (const_tree); extern bool bitfield_p (const_tree); extern tree cp_stabilize_reference (tree); extern bool builtin_valid_in_constant_expr_p (const_tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt_loc (location_t, enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...); extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *); extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *); extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned); extern tree build_cplus_new (tree, tree, tsubst_flags_t); extern tree build_local_temp (tree); extern bool is_local_temp (tree); extern tree build_aggr_init_expr (tree, tree); extern tree get_target_expr (tree); extern tree get_target_expr_sfinae (tree, tsubst_flags_t); extern tree build_cplus_array_type (tree, tree); extern tree build_array_of_n_type (tree, int); extern bool array_of_runtime_bound_p (tree); extern bool vla_type_p (tree); extern tree build_array_copy (tree); extern tree build_vec_init_expr (tree, tree, tsubst_flags_t); extern void diagnose_non_constexpr_vec_init (tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern tree build_ref_qualified_type (tree, cp_ref_qualifier); inline tree ovl_first (tree) ATTRIBUTE_PURE; extern tree ovl_make (tree fn, tree next = NULL_TREE); extern tree ovl_insert (tree fn, tree maybe_ovl, bool using_p = false); extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE; extern void lookup_mark (tree lookup, bool val); extern tree lookup_add (tree fns, tree lookup); extern tree lookup_maybe_add (tree fns, tree lookup, bool deduping); extern int is_overloaded_fn (tree) ATTRIBUTE_PURE; extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE; extern tree dependent_name (tree); extern tree maybe_get_fns (tree) ATTRIBUTE_PURE; extern tree get_fns (tree) ATTRIBUTE_PURE; extern tree get_first_fn (tree) ATTRIBUTE_PURE; extern tree ovl_scope (tree); extern const char *cxx_printable_name (tree, int); extern const char *cxx_printable_name_translate (tree, int); extern tree canonical_eh_spec (tree); extern tree build_cp_fntype_variant (tree, cp_ref_qualifier, tree, bool); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern bool array_of_unknown_bound_p (const_tree); extern tree break_out_target_exprs (tree, bool = false); extern tree build_ctor_subob_ref (tree, tree, tree); extern tree replace_placeholders (tree, tree, bool * = NULL); extern bool find_placeholders (tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (const_tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (const_tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (const_tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_reference_type (tree, bool); extern tree move (tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern bool cv_qualified_p (const_tree); extern tree cv_unqualified (tree); extern special_function_kind special_function_p (const_tree); extern special_function_kind special_memfn_p (const_tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern duration_kind decl_storage_duration (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, hash_set<tree> *); #define cp_walk_tree(tp,func,data,pset) \ walk_tree_1 (tp, func, data, pset, cp_walk_subtrees) #define cp_walk_tree_without_duplicates(tp,func,data) \ walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees) extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); extern bool cxx_type_hash_eq (const_tree, const_tree); extern tree cxx_copy_lang_qualifiers (const_tree, const_tree); extern void cxx_print_statistics (void); extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t); /* in ptree.c */ extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (diagnostic_context *, const char *, struct diagnostic_info *); /* in typeck.c */ /* Says how we should behave when comparing two arrays one of which has unknown bounds. */ enum compare_bounds_t { bounds_none, bounds_either, bounds_first }; extern bool cxx_mark_addressable (tree, bool = false); extern int string_conv_p (const_tree, const_tree, int); extern tree cp_truthvalue_conversion (tree, tsubst_flags_t); extern tree contextual_conv_bool (tree, tsubst_flags_t); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree require_complete_type_sfinae (tree, tsubst_flags_t); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); inline bool type_unknown_p (const_tree); enum { ce_derived, ce_type, ce_normal, ce_exact }; extern bool comp_except_specs (const_tree, const_tree, int); extern bool comptypes (tree, tree, int); extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); extern bool similar_type_p (tree, tree); extern bool compparms (const_tree, const_tree); extern int comp_cv_qualification (const_tree, const_tree); extern int comp_cv_qualification (int, int); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (location_t, tree, enum tree_code, bool); extern tree cxx_sizeof_or_alignof_type (location_t, tree, enum tree_code, bool, bool); extern tree cxx_alignas_expr (tree); extern tree cxx_sizeof_nowarn (tree); extern tree is_bitfield_expr_with_lowered_type (const_tree); extern tree unlowered_expr_type (const_tree); extern tree decay_conversion (tree, tsubst_flags_t, bool = true); extern tree build_class_member_access_expr (cp_expr, tree, tree, bool, tsubst_flags_t); extern tree finish_class_member_access_expr (cp_expr, tree, bool, tsubst_flags_t); extern tree lookup_destructor (tree, tree, tree, tsubst_flags_t); extern tree build_x_indirect_ref (location_t, tree, ref_operator, tsubst_flags_t); extern tree cp_build_indirect_ref (location_t, tree, ref_operator, tsubst_flags_t); extern tree cp_build_fold_indirect_ref (tree); extern tree build_array_ref (location_t, tree, tree); extern tree cp_build_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t); extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) ATTRIBUTE_SENTINEL; extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **, tsubst_flags_t, tree = NULL_TREE); extern tree build_x_binary_op (const op_location_t &, enum tree_code, tree, enum tree_code, tree, enum tree_code, tree *, tsubst_flags_t); inline tree build_x_binary_op (const op_location_t &loc, enum tree_code code, tree arg1, tree arg2, tsubst_flags_t complain) { return build_x_binary_op (loc, code, arg1, TREE_CODE (arg1), arg2, TREE_CODE (arg2), NULL, complain); } extern tree build_x_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree build_x_unary_op (location_t, enum tree_code, cp_expr, tsubst_flags_t); extern tree cp_build_addressof (location_t, tree, tsubst_flags_t); extern tree cp_build_addr_expr (tree, tsubst_flags_t); extern tree cp_build_unary_op (enum tree_code, tree, bool, tsubst_flags_t); extern tree genericize_compound_lvalue (tree); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_x_compound_expr_from_list (tree, expr_list_kind, tsubst_flags_t); extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *, const char *, tsubst_flags_t); extern tree build_x_compound_expr (location_t, tree, tree, tsubst_flags_t); extern tree build_compound_expr (location_t, tree, tree); extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); extern tree build_static_cast (location_t, tree, tree, tsubst_flags_t); extern tree build_reinterpret_cast (location_t, tree, tree, tsubst_flags_t); extern tree build_const_cast (location_t, tree, tree, tsubst_flags_t); extern tree build_c_cast (location_t, tree, tree); extern cp_expr build_c_cast (location_t loc, tree type, cp_expr expr); extern tree cp_build_c_cast (location_t, tree, tree, tsubst_flags_t); extern cp_expr build_x_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree cp_build_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree convert_for_initialization (tree, tree, tree, int, impl_conv_rhs, tree, int, tsubst_flags_t); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree, compare_bounds_t); extern bool error_type_p (const_tree); extern bool ptr_reasonably_similar (const_tree, const_tree); extern tree build_ptrmemfunc (tree, tree, int, bool, tsubst_flags_t); extern int cp_type_quals (const_tree); extern int type_memfn_quals (const_tree); extern cp_ref_qualifier type_memfn_rqual (const_tree); extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier = REF_QUAL_NONE); extern bool cp_has_mutable_p (const_tree); extern bool at_least_as_qualified_p (const_tree, const_tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree common_pointer_type (tree, tree); extern tree composite_pointer_type (const op_location_t &, tree, tree, tree, tree, composite_pointer_operation, tsubst_flags_t); extern tree merge_types (tree, tree); extern tree strip_array_domain (tree); extern tree check_return_expr (tree, bool *); extern tree spaceship_type (tree, tsubst_flags_t = tf_warning_or_error); extern tree genericize_spaceship (tree, tree, tree); extern tree cp_build_binary_op (const op_location_t &, enum tree_code, tree, tree, tsubst_flags_t); extern tree build_x_vec_perm_expr (location_t, tree, tree, tree, tsubst_flags_t); #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (input_location, T, SIZEOF_EXPR, false, true) extern tree build_simple_component_ref (tree, tree); extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (location_t, tree, tsubst_flags_t); extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t); extern tree convert_ptrmem (tree, tree, bool, bool, tsubst_flags_t); extern int lvalue_or_else (tree, enum lvalue_use, tsubst_flags_t); extern void check_template_keyword (tree); extern bool check_raw_literal_operator (const_tree decl); extern bool check_literal_operator_args (const_tree, bool *, bool *); extern void maybe_warn_about_useless_cast (location_t, tree, tree, tsubst_flags_t); extern tree cp_perform_integral_promotions (tree, tsubst_flags_t); extern tree finish_left_unary_fold_expr (tree, int); extern tree finish_right_unary_fold_expr (tree, int); extern tree finish_binary_fold_expr (tree, tree, int); extern bool treat_lvalue_as_rvalue_p (tree, bool); extern bool decl_in_std_namespace_p (tree); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (location_t, const_tree, const_tree, diagnostic_t); inline location_t cp_expr_loc_or_loc (const_tree t, location_t or_loc) { location_t loc = cp_expr_location (t); if (loc == UNKNOWN_LOCATION) loc = or_loc; return loc; } inline location_t cp_expr_loc_or_input_loc (const_tree t) { return cp_expr_loc_or_loc (t, input_location); } inline void cxx_incomplete_type_diagnostic (const_tree value, const_tree type, diagnostic_t diag_kind) { cxx_incomplete_type_diagnostic (cp_expr_loc_or_input_loc (value), value, type, diag_kind); } extern void cxx_incomplete_type_error (location_t, const_tree, const_tree); inline void cxx_incomplete_type_error (const_tree value, const_tree type) { cxx_incomplete_type_diagnostic (value, type, DK_ERROR); } extern void cxx_incomplete_type_inform (const_tree); extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void cxx_readonly_error (location_t, tree, enum lvalue_use); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern int abstract_virtuals_error (abstract_class_use, tree); extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t); extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t); extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int); extern tree split_nonconstant_init (tree, tree); extern bool check_narrowing (tree, tree, tsubst_flags_t, bool = false); extern bool ordinary_char_type_p (tree); extern tree digest_init (tree, tree, tsubst_flags_t); extern tree digest_init_flags (tree, tree, int, tsubst_flags_t); extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (location_t, tree, tsubst_flags_t); extern tree build_m_component_ref (tree, tree, tsubst_flags_t); extern tree build_functional_cast (location_t, tree, tree, tsubst_flags_t); extern tree add_exception_specifier (tree, tree, tsubst_flags_t); extern tree merge_exception_specifiers (tree, tree); /* in mangle.c */ extern void init_mangle (void); extern void mangle_decl (tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree, tree); extern tree mangle_guard_variable (tree); extern tree mangle_tls_init_fn (tree); extern tree mangle_tls_wrapper_fn (tree); extern bool decl_tls_wrapper_p (tree); extern tree mangle_ref_init_variable (tree); extern tree mangle_template_parm_object (tree); extern char * get_mangled_vtable_map_var_name (tree); extern bool mangle_return_type_p (tree); extern tree mangle_decomp (tree, vec<tree> &); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern alias_set_type cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (const_tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern bool cxx_block_may_fallthru (const_tree); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, gimple_seq *, gimple_seq *); extern void cp_genericize (tree); extern bool cxx_omp_const_qual_no_mutable (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing_1 (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern tree cxx_omp_clause_dtor (tree, tree); extern void cxx_omp_finish_clause (tree, gimple_seq *); extern bool cxx_omp_privatize_by_reference (const_tree); extern bool cxx_omp_disregard_value_expr (tree, bool); extern void cp_fold_function (tree); extern tree cp_fold_maybe_rvalue (tree, bool); extern tree cp_fold_rvalue (tree); extern tree cp_fully_fold (tree); extern tree cp_fully_fold_init (tree); extern void clear_fold_cache (void); extern tree lookup_hotness_attribute (tree); extern tree process_stmt_hotness_attribute (tree, location_t); extern bool simple_empty_class_p (tree, tree, tree_code); extern tree fold_builtin_source_location (location_t); /* in name-lookup.c */ extern tree strip_using_decl (tree); /* Tell the binding oracle what kind of binding we are looking for. */ enum cp_oracle_request { CP_ORACLE_IDENTIFIER }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier); extern cp_binding_oracle_function *cp_binding_oracle; /* Set during diagnostics to record the failed constraint. This is a TREE_LIST whose VALUE is the constraint and whose PURPOSE are the instantiation arguments Defined in pt.c. */ extern tree current_failed_constraint; /* An RAII class to manage the failed constraint. */ struct diagnosing_failed_constraint { diagnosing_failed_constraint (tree, tree, bool); ~diagnosing_failed_constraint (); static bool replay_errors_p (); bool diagnosing_error; }; /* in constraint.cc */ extern cp_expr finish_constraint_or_expr (location_t, cp_expr, cp_expr); extern cp_expr finish_constraint_and_expr (location_t, cp_expr, cp_expr); extern cp_expr finish_constraint_primary_expr (cp_expr); extern tree finish_concept_definition (cp_expr, tree); extern tree combine_constraint_expressions (tree, tree); extern tree append_constraint (tree, tree); extern tree get_constraints (const_tree); extern void set_constraints (tree, tree); extern void remove_constraints (tree); extern tree current_template_constraints (void); extern tree associate_classtype_constraints (tree); extern tree build_constraints (tree, tree); extern tree maybe_substitute_reqs_for (tree, const_tree); extern tree get_template_head_requirements (tree); extern tree get_trailing_function_requirements (tree); extern tree get_shorthand_constraints (tree); extern tree build_concept_id (tree); extern tree build_type_constraint (tree, tree, tsubst_flags_t); extern tree build_concept_check (tree, tree, tsubst_flags_t); extern tree build_concept_check (tree, tree, tree, tsubst_flags_t); extern tree_pair finish_type_constraints (tree, tree, tsubst_flags_t); extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE); extern void placeholder_extract_concept_and_args (tree, tree&, tree&); extern bool equivalent_placeholder_constraints (tree, tree); extern hashval_t hash_placeholder_constraint (tree); extern bool deduce_constrained_parameter (tree, tree&, tree&); extern tree resolve_constraint_check (tree); extern tree check_function_concept (tree); extern tree finish_template_introduction (tree, tree, location_t loc); extern bool valid_requirements_p (tree); extern tree finish_concept_name (tree); extern tree finish_shorthand_constraint (tree, tree); extern tree finish_requires_expr (location_t, tree, tree); extern tree finish_simple_requirement (location_t, tree); extern tree finish_type_requirement (location_t, tree); extern tree finish_compound_requirement (location_t, tree, tree, bool); extern tree finish_nested_requirement (location_t, tree); extern void check_constrained_friend (tree, tree); extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree); extern tree tsubst_parameter_mapping (tree, tree, tsubst_flags_t, tree); extern tree get_mapped_args (tree); struct processing_constraint_expression_sentinel { processing_constraint_expression_sentinel (); ~processing_constraint_expression_sentinel (); }; extern bool processing_constraint_expression_p (); extern tree unpack_concept_check (tree); extern tree evaluate_concept_check (tree, tsubst_flags_t); extern tree satisfy_constraint_expression (tree); extern bool constraints_satisfied_p (tree); extern bool constraints_satisfied_p (tree, tree); extern void clear_satisfaction_cache (); extern bool* lookup_subsumption_result (tree, tree); extern bool save_subsumption_result (tree, tree, bool); extern tree find_template_parameters (tree, tree); extern bool equivalent_constraints (tree, tree); extern bool equivalently_constrained (tree, tree); extern bool subsumes_constraints (tree, tree); extern bool strictly_subsumes (tree, tree, tree); extern bool weakly_subsumes (tree, tree, tree); extern int more_constrained (tree, tree); extern bool at_least_as_constrained (tree, tree); extern bool constraints_equivalent_p (tree, tree); extern bool atomic_constraints_identical_p (tree, tree); extern hashval_t iterative_hash_constraint (tree, hashval_t); extern hashval_t hash_atomic_constraint (tree); extern void diagnose_constraints (location_t, tree, tree); /* in logic.cc */ extern bool subsumes (tree, tree); /* In class.c */ extern void cp_finish_injected_record_type (tree); /* in vtable-class-hierarchy.c */ extern void vtv_compute_class_hierarchy_transitive_closure (void); extern void vtv_generate_init_routine (void); extern void vtv_save_class_info (tree); extern void vtv_recover_class_info (void); extern void vtv_build_vtable_verify_fndecl (void); /* In constexpr.c */ extern void fini_constexpr (void); extern bool literal_type_p (tree); extern tree register_constexpr_fundef (tree, tree); extern bool is_valid_constexpr_fn (tree, bool); extern bool check_constexpr_ctor_body (tree, tree, bool); extern tree constexpr_fn_retval (tree); extern tree ensure_literal_type_for_constexpr_object (tree); extern bool potential_constant_expression (tree); extern bool is_constant_expression (tree); extern bool is_nondependent_constant_expression (tree); extern bool is_nondependent_static_init_expression (tree); extern bool is_static_init_expression (tree); extern bool potential_rvalue_constant_expression (tree); extern bool require_potential_constant_expression (tree); extern bool require_constant_expression (tree); extern bool require_rvalue_constant_expression (tree); extern bool require_potential_rvalue_constant_expression (tree); extern tree cxx_constant_value (tree, tree = NULL_TREE); extern void cxx_constant_dtor (tree, tree); extern tree cxx_constant_init (tree, tree = NULL_TREE); extern tree maybe_constant_value (tree, tree = NULL_TREE, bool = false, bool = false); extern tree maybe_constant_init (tree, tree = NULL_TREE, bool = false); extern tree fold_non_dependent_expr (tree, tsubst_flags_t = tf_warning_or_error, bool = false, tree = NULL_TREE); extern tree maybe_fold_non_dependent_expr (tree, tsubst_flags_t = tf_warning_or_error); extern tree fold_non_dependent_init (tree, tsubst_flags_t = tf_warning_or_error, bool = false, tree = NULL_TREE); extern tree fold_simple (tree); extern bool reduced_constant_expression_p (tree); extern bool is_instantiation_of_constexpr (tree); extern bool var_in_constexpr_fn (tree); extern bool var_in_maybe_constexpr_fn (tree); extern void explain_invalid_constexpr_fn (tree); extern vec<tree> cx_error_context (void); extern tree fold_sizeof_expr (tree); extern void clear_cv_and_fold_caches (bool = true); extern tree unshare_constructor (tree CXX_MEM_STAT_INFO); /* In cp-ubsan.c */ extern void cp_ubsan_maybe_instrument_member_call (tree); extern void cp_ubsan_instrument_member_accesses (tree *); extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree); extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree); extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree); /* In coroutines.cc */ extern tree finish_co_return_stmt (location_t, tree); extern tree finish_co_await_expr (location_t, tree); extern tree finish_co_yield_expr (location_t, tree); extern tree coro_validate_builtin_call (tree, tsubst_flags_t = tf_warning_or_error); extern bool morph_fn_to_coro (tree, tree *, tree *); /* Inline bodies. */ inline tree ovl_first (tree node) { while (TREE_CODE (node) == OVERLOAD) node = OVL_FUNCTION (node); return node; } inline bool type_unknown_p (const_tree expr) { return TREE_TYPE (expr) == unknown_type_node; } inline hashval_t named_decl_hash::hash (const value_type decl) { tree name = OVL_NAME (decl); return name ? IDENTIFIER_HASH_VALUE (name) : 0; } inline bool named_decl_hash::equal (const value_type existing, compare_type candidate) { tree name = OVL_NAME (existing); return candidate == name; } inline bool null_node_p (const_tree expr) { STRIP_ANY_LOCATION_WRAPPER (expr); return expr == null_node; } /* True iff T is a variable template declaration. */ inline bool variable_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (!PRIMARY_TEMPLATE_P (t)) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r); return false; } /* True iff T is a standard concept definition. This will return true for both the template and underlying declaration. */ inline bool standard_concept_p (tree t) { if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); return TREE_CODE (t) == CONCEPT_DECL; } /* True iff T is a variable concept definition. This will return true for both the template and the underlying declaration. */ inline bool variable_concept_p (tree t) { if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); return VAR_P (t) && DECL_DECLARED_CONCEPT_P (t); } /* True iff T is a function concept definition or an overload set containing multiple function concepts. This will return true for both the template and the underlying declaration. */ inline bool function_concept_p (tree t) { if (TREE_CODE (t) == OVERLOAD) t = OVL_FIRST (t); if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); return TREE_CODE (t) == FUNCTION_DECL && DECL_DECLARED_CONCEPT_P (t); } /* True iff T is a standard, variable, or function concept. */ inline bool concept_definition_p (tree t) { if (t == error_mark_node) return false; /* Adjust for function concept overloads. */ if (TREE_CODE (t) == OVERLOAD) t = OVL_FIRST (t); /* See through templates. */ if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); /* The obvious and easy case. */ if (TREE_CODE (t) == CONCEPT_DECL) return true; /* Definitely not a concept. */ if (!VAR_OR_FUNCTION_DECL_P (t)) return false; if (!DECL_LANG_SPECIFIC (t)) return false; return DECL_DECLARED_CONCEPT_P (t); } /* Same as above, but for const trees. */ inline bool concept_definition_p (const_tree t) { return concept_definition_p (const_cast<tree> (t)); } /* True if t is an expression that checks a concept. */ inline bool concept_check_p (const_tree t) { if (TREE_CODE (t) == CALL_EXPR) t = CALL_EXPR_FN (t); if (t && TREE_CODE (t) == TEMPLATE_ID_EXPR) return concept_definition_p (TREE_OPERAND (t, 0)); return false; } /* True if t is a "constrained auto" type-specifier. */ inline bool is_constrained_auto (const_tree t) { return is_auto (t) && PLACEHOLDER_TYPE_CONSTRAINTS (t); } /* RAII class to push/pop class scope T; if T is not a class, do nothing. */ struct push_nested_class_guard { bool push; push_nested_class_guard (tree t) : push (t && CLASS_TYPE_P (t)) { if (push) push_nested_class (t); } ~push_nested_class_guard () { if (push) pop_nested_class (); } }; #if CHECKING_P namespace selftest { extern void run_cp_tests (void); /* Declarations for specific families of tests within cp, by source file, in alphabetical order. */ extern void cp_pt_c_tests (); extern void cp_tree_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ /* -- end of C++ */ #endif /* ! GCC_CP_TREE_H */
time_dpotrf-task.c
/** * * @generated d Tue Jan 7 11:45:24 2014 * **/ #define _TYPE double #define _PREC double #define _LAMCH LAPACKE_dlamch_work #include "omp.h" #define _NAME "PLASMA_dpotrf_Tile" /* See Lawn 41 page 120 */ #define _FMULS FMULS_POTRF( N ) #define _FADDS FADDS_POTRF( N ) #include "./timing.inc" static double RunTest(real_Double_t *t_, struct user_parameters* params) { double t; int64_t N = params->matrix_size; int64_t NB = params->blocksize; int check = params->check; int uplo = PlasmaUpper; double check_res = 0; /* Allocate Data */ PLASMA_desc *descA = NULL; double* ptr = malloc(N * N * sizeof(double)); PLASMA_Desc_Create(&descA, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, N, 0, 0, N, N); #pragma omp parallel #pragma omp master plasma_pdplgsy_quark( (double)N, *descA, 51 ); /* Save A for check */ double *A = NULL; if(check) { A = (double*)malloc(N * N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descA, (void*)A, N); } /* PLASMA DPOSV */ START_TIMING(); #pragma omp parallel #pragma omp master plasma_pdpotrf_quark(uplo, *descA); STOP_TIMING(); /* Check the solution */ if ( check ) { PLASMA_desc *descB = NULL; double* ptr = (double*)malloc(N * sizeof(double)); PLASMA_Desc_Create(&descB, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, 1, 0, 0, N, 1); plasma_pdpltmg_seq(* descB, 7672 ); double* B = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)B, N); PLASMA_dpotrs_Tile( uplo, descA, descB ); double* X = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)X, N); check_res = d_check_solution(N, N, 1, A, N, B, X, N); PASTE_CODE_FREE_MATRIX( descB ); free( A ); free( B ); free( X ); } PASTE_CODE_FREE_MATRIX( descA ); return check_res; }
core_number_evaluator.h
#pragma once #ifndef CORENRVERIFIER_H #define CORENRVERIFIER_H #include "gms/algorithms/preprocessing/general.h" #include "gms/algorithms/preprocessing/verifiers/degeneracy_verifier.h" namespace CoreNumberEvaluator { /* CoreNumberOfOrder: Is the core number of the given order CoreNumber: Is the core number of the graph relativeError: Relative approxmiation error (approx - real)/real faultRate: What is the proportion of vertices in the order which breaks the ordering, i.e. which have a deg > core Num relativeMeanDifference: What is the average relative difference between an outlier and the core Num */ struct CoreNumberInfo { size_t coreNumberOfOrder; size_t coreNumber; double relativeError; double faultRate; double relativeMeanDifference; //Auxiliary function to get get average over mutliple runs void add(CoreNumberInfo other) { coreNumberOfOrder += other.coreNumberOfOrder; relativeError += other.relativeError; faultRate += other.faultRate; relativeMeanDifference += other.relativeMeanDifference; } //Auxiliary function to get get average over mutliple runs void scaleDown(int numberOfRuns) { coreNumberOfOrder = std::round((double)coreNumberOfOrder / (double)numberOfRuns); relativeError /= numberOfRuns; faultRate /= numberOfRuns; relativeMeanDifference /= numberOfRuns; } }; //Convert rankFormat to orderFormat and vice versa template<class Input = std::vector<NodeId>> void switchOrderingFormatInPlace(Input &ordering) { auto n = ordering.size(); pvector<NodeId> temp(n); #pragma omp parallel for for(NodeId v = 0; v < n; v++) temp[ordering[v]] = v; #pragma omp parallel for for(NodeId i = 0; i < n; i++) ordering[i] = temp[i]; } //Convert rankFormat to orderFormat and vice versa template<class Input = std::vector<NodeId>, class Output = std::vector<NodeId>> void switchOrderingFormat(const Input &ordering, Output &result) { auto n = ordering.size(); #pragma omp parallel for for(NodeId v = 0; v < n; v++) result[ordering[v]] = v; } template<bool useRankFormat = false, class Input = std::vector<NodeId>> CoreNumberInfo evaluateCoreNrAccuracy(const Input &order, const RoaringGraph &graph, size_t actualCoreNumber) { if constexpr (useRankFormat) { std::vector<NodeId> orderFormat(graph.num_nodes()); switchOrderingFormat(order, orderFormat); return evaluateCoreNrAccuracy<false>(orderFormat, graph, actualCoreNumber); } else { size_t coreNumber = actualCoreNumber; unsigned biggerThanCore = 0; unsigned int difAcc = 0; RoaringSet visited{}; for (auto v : order) { auto deg = graph.out_neigh(v) .difference(visited) .cardinality(); if (deg > actualCoreNumber) { coreNumber = std::max(coreNumber, deg); biggerThanCore++; difAcc += deg - actualCoreNumber; } visited.union_inplace(v); } return { coreNumber, actualCoreNumber, (coreNumber - actualCoreNumber) / (double)actualCoreNumber, (double)biggerThanCore / (double)graph.num_nodes(), (biggerThanCore == 0) ? 0 : ((double)difAcc / (double)biggerThanCore) / (double)actualCoreNumber}; } } //Get The degeneracy/Core Number of an order template<bool useRankFormat = false, class Input = std::vector<NodeId>> size_t getCoreNumberOfOrder(const Input &order, const RoaringGraph &graph) { if constexpr (useRankFormat) { std::vector<NodeId> orderFormat(graph.num_nodes()); switchOrderingFormat(order, orderFormat); return getCoreNumberOfOrder<false>(orderFormat, graph); } else { RoaringSet visited{}; size_t coreNumber = 0; for (auto v : order) { auto deg = graph.out_neigh(v) .difference(visited) .cardinality(); coreNumber = std::max(coreNumber, deg); visited.union_inplace(v); } return coreNumber; } } template<bool useRankFormat = false, class Input = std::vector<NodeId>> CoreNumberInfo evaluateCoreNrAccuracy(const Input &order, const RoaringGraph &graph) { std::vector<NodeId> matulaDegeneracyOrder(graph.num_nodes()); PpSequential::getDegeneracyOrderingMatula<RoaringGraph, false>(graph, matulaDegeneracyOrder); if constexpr(useRankFormat) { std::vector<NodeId> orderFormat(graph.num_nodes()); switchOrderingFormat(order, orderFormat); return evaluateCoreNrAccuracy<false>(orderFormat, graph, getCoreNumberOfOrder(matulaDegeneracyOrder, graph)); } else return evaluateCoreNrAccuracy<false>(order, graph, getCoreNumberOfOrder(matulaDegeneracyOrder, graph)); } template<bool useRankFormat = false, class Input = std::vector<NodeId>> CoreNumberInfo evaluateCoreNrAccuracy(const Input &order, const CSRGraph &graph) { RoaringGraph rgraph = RoaringGraph::FromCGraph(graph); return evaluateCoreNrAccuracy<useRankFormat>(order, rgraph); } } // namespace CoreNumberVerifier #endif
9591.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(4) { #pragma omp for schedule(static, 1) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(static, 1) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(static, 1) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
FG_vector.h
#ifndef __FG_VECTOR_H__ #define __FG_VECTOR_H__ /* * Copyright 2014 Open Connectome Project (http://openconnecto.me) * Written by Da Zheng (zhengda1936@gmail.com) * * This file is part of FlashGraph. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <memory> #include <set> #include <fstream> #include "graph_engine.h" namespace fg { /** * \brief FlashGraph vector that provides several parallelized methods * when compared to an STL-vector. <br> * **NOTE**: Not an STL-compatible data structure. This vector is also * ideally used with numeric data types. <br> * Methods marked with the keyword **parallel** are parallelized implementations. */ template<class T> class FG_vector { // TODO I might need to split the vector into partitions. std::vector<T> eles; FG_vector(graph_engine::ptr graph) { eles.resize(graph->get_num_vertices()); } FG_vector(size_t size) { eles.resize(size); } public: typedef typename std::shared_ptr<FG_vector<T> > ptr; /** Smart pointer for object access */ /** * \brief Create a vector of the length the same as the number of vertices * in the graph. An object of this * class should be created using this or the `create(size_t size)` * method. * \param graph A shared pointer to a graph engine object. This is generally * the graph for which you are creating the vector. */ static ptr create(graph_engine::ptr graph) { return ptr(new FG_vector<T>(graph)); } /** * \brief Create a vector of the specified length. An object of this * class should be created using this or the `create(graph_engine::ptr graph)` * method. * \param size The length of the vector you desire. */ static ptr create(size_t size) { return ptr(new FG_vector<T>(size)); } /** * \brief Initialize the vector a single value as specified by parameter 1. * * \param v The initialization parameter for the vector data. * **parallel** */ void init(T v) { #pragma omp parallel for for (size_t i = 0; i < eles.size(); i++) eles[i] = v; } /** * \brief Equivalent to += operator. Element by element * addition of one `FG_vector` to another. * \param other An `FG_vector` smart pointer object. * */ void plus_eq(FG_vector<T>::ptr other) { assert(get_size() == other->get_size()); for (size_t i = 0; i < get_size(); i++) { eles[i] += other->get(i); } } /** * \brief Assign a value `num` many times to the vector. * \param num The number of elements to assign. * \param val The value a user wnats to assign to vector positions. */ void assign(size_t num, T val) { eles.assign(num, val); } /** * \brief Make a shallow copy of the vector. * \param other An `FG_vector` smart pointer. * **paralel** */ void shallow_copy(FG_vector<T>::ptr other) { assert(this->get_size() == other->get_size()); #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) { this->eles[i] = other->eles[i]; } } template<class T1> void copy_to(T1 *arr, size_t size) { size_t num = std::min(size, eles.size()); for (size_t i = 0; i < num; i++) arr[i] = eles[i]; } /** * \brief Check for equality between two `FG_vector`s element by * element. * \param other An `FG_vector` smart pointer. */ // TODO DM: Make parallel / smarter bool eq_all(FG_vector<T>::ptr other) { return std::equal(this->eles.begin(), this->eles.end(), other->eles.begin()); } void init_rand(long max = std::numeric_limits<T>::max(), unsigned int seed = 0) { if (seed > 0) srandom(seed); if (max >= std::numeric_limits<T>::max()) max = std::numeric_limits<T>::max(); #pragma omp parallel for for (size_t i = 0; i < eles.size(); i++) eles[i] = random() % max; } /** * \brief Populate an [STL set](http://www.cplusplus.com/reference/set/set/) * with the unique elements in the vector. All duplicates are ignored. * * \param set The *empty* STL set that will be populated with unique vector members. * */ void unique(std::set<T> &set) const { // TODO we need a parallel implementation. assert(set.empty()); // FIXME: `new` a shared/unique ptr & remove param for (T v : eles) { set.insert(v); } } /** * \brief Get the number of elements contained in the vector. * * \return The number of elements in the vector */ size_t get_size() const { return eles.size(); } /** * \brief Get a pointer to the memory array used internally by * the vector to store its owned elements. * \return A pointer the underlying data memory array. * */ T *get_data() { return eles.data(); } /** * \brief Const method to get a pointer to the memory array * used internally by the vector to store its owned elements. * \return A const pointer the underlying data memory array. * * */ const T*get_data() const { return eles.data(); } /** * \brief Compute the [dot product](http://en.wikipedia.org/wiki/Dot_product) * of two FG vectors. <br> * **parallel** * * \return A value of data type `T` value that is the dot product. */ T dot_product(const FG_vector<T> &other) const { assert(this->get_size() == other.get_size()); T ret = 0; for (size_t i = 0; i < get_size(); i++) ret += get(i) * other.get(i); return ret; } /** * \brief Compute the * [L2 Norm](http://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) * (also know as Euclidean distance) of a vector. <br> * **parallel** * * \return An object of type `T` with the value of the L2 norm. */ T norm2() const { T ret = 0; for (size_t i = 0; i < get_size(); i++) ret += get(i) * get(i); return sqrt(ret); } /** * \brief Compute the * [L1 Norm](http://en.wikipedia.org/wiki/Norm_(mathematics)#Taxicab_norm_or_Manhattan_norm) * (also Taxicab norm) of an FG_vector. <br> * **parallel** * * \return An object of type `T` with the L1 norm. */ T norm1() const { T ret = 0; for (size_t i = 0; i < get_size(); i++) ret += fabs(get(i)); return ret; } /** * \brief Compute the sum of all elements in the vector. <br> * If the type is integer, the sum can overflow. * **parallel** * \return The sum of all items in the vector. */ T sum() const { return sum<T>(); } /** * \brief Compute the sum of all elements in the vector. <br> * This sum() allows users to specify the type of the result, so users * can avoid integer overflow. * **parallel** * \return The sum of all items in the vector. */ template<class ResType> ResType sum() const { struct identity_func { ResType operator()(T v) { return v; } }; return aggregate<identity_func, ResType>(identity_func()); } template<class Func, class ResType> ResType aggregate(Func func) const { ResType ret = 0; for (size_t i = 0; i < get_size(); i++) ret += func(eles[i]); return ret; } /** * \brief Find the maximal value in the vector and return its value. * \return The maximal value in the vector. */ T max() const { return max_val_loc().first; } /** * \brief Find the maximal value in the vector and return its value * and its location. * \return A pair that contains the maximal value and its location * in the vector. */ std::pair<T, off_t> max_val_loc() const { T ret = std::numeric_limits<T>::min(); off_t idx = 0; for (size_t i = 0; i < get_size(); i++) { if (ret < get(i)) { ret = get(i); idx = i; } } return std::pair<T, off_t>(ret, idx); } /** * \brief Find the index with the minmimal value in the vector and * return its value. * \return The minimal value in the vector. */ T min() const { T ret = std::numeric_limits<T>::max(); for (size_t i = 0; i < get_size(); i++) ret = std::min(get(i), ret); return ret; } /** * \brief Find the index with the minimal value in the vector and * return *the index*. * \return The minimal index value in the vector. */ size_t argmin() { typename std::vector<T>::iterator res = std::min_element(eles.begin(), eles.end()); size_t ret = std::distance(eles.begin(), res); return ret; } /** * \brief Serial element-wise print of the vector. * **Not intended for very large vectors** */ void print(vsize_t max_print_size=100) { vsize_t print_len = get_size() > max_print_size ? max_print_size : get_size(); std::cout << "["; for (vsize_t i=0; i < print_len; i++) { std::cout << " " << get(i); } if (print_len == max_print_size && print_len != get_size()) { std::cout << "..."; } std::cout << " ]\n\n"; } /** * \brief Write the space separated vector to file. * \param fn The file name you wish written to file. */ void to_file(std::string fn) { std::ofstream f; f.open(fn); for (vsize_t i=0; i < get_size(); i++) { f << get(i) << " "; } f.close(); } void neg_in_place() { for (size_t i = 0; i < get_size(); i++) eles[i] = -eles[i]; } /** * \brief In place division of vector by a single value. * \param v The value by which you want the array divided. * **parallel** */ void div_by_in_place(T v) { #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) eles[i] /= v; } /** * \brief element-wise merge with another vector and store the result * in this vector. * \param vec The vector that you want to merge with. * \param func The operator that you want to perform on each pair of * elements. */ template<class MergeFunc, class VecType> void merge_in_place(typename FG_vector<VecType>::ptr vec, MergeFunc func) { assert(this->get_size() == vec->get_size()); #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) eles[i] = func(eles[i], vec->get(i)); } /** * \brief In place element-wise addition by another vector. * \param vec The vector by which you want to add to this vector. * **parallel** */ template<class T2> void add_in_place(typename FG_vector<T2>::ptr vec) { struct add_func { T operator()(const T &v1, const T2 &v2) { return v1 + v2; } }; merge_in_place<add_func, T2>(vec, add_func()); } /** * \brief In place subtraction of the vector by another vector. * \param vec The vector by which you want the array to be subtracted. * **parallel** */ template<class T2> void subtract_in_place(typename FG_vector<T2>::ptr &vec) { struct sub_func { T operator()(const T &v1, const T2 &v2) { return v1 - v2; } }; merge_in_place<sub_func, T2>(vec, sub_func()); } template<class T2> void multiply_in_place(T2 v) { for (size_t i = 0; i < get_size(); i++) eles[i] *= v; } template<class IN_TYPE, class OUT_TYPE> typename FG_vector<OUT_TYPE>::ptr multiply(IN_TYPE v) const { typename FG_vector<OUT_TYPE>::ptr ret = FG_vector<OUT_TYPE>::create(get_size()); for (size_t i = 0; i < get_size(); i++) ret->set(i, this->eles[i] * v); return ret; } template<class IN_TYPE, class OUT_TYPE> typename FG_vector<OUT_TYPE>::ptr multiply(typename FG_vector<IN_TYPE>::ptr vec) const { if (vec->get_size() != this->get_size()) return typename FG_vector<OUT_TYPE>::ptr(); typename FG_vector<OUT_TYPE>::ptr ret = FG_vector<OUT_TYPE>::create(get_size()); for (size_t i = 0; i < get_size(); i++) ret->eles[i] = this->eles[i] * vec->get(i); return ret; } /** * \brief Apply a function to every element in an FG_vector. * * \param func A user-defined function. * \param output The FG_vector that you want to apply the function to. * * **parallel** */ template<class ApplyFunc> void apply(ApplyFunc func, FG_vector<T> &output) { #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) output.set(i, func(eles[i])); } // TODO these interfaces assume shared memory. /** * Set a value of an index in the vector. * * **NOTE:** This function assumes a shared memory environment. * \param id The index where value is being set. * \param v The value that the index will be set to. */ void set(vertex_id_t id, const T &v) { eles[id] = v; } /** * \brief Const get the value of a particular index. * \param id The index of the vector from where you want a value. * \return The value requested by param 1 * */ const T &get(vertex_id_t id) const { return eles[id]; } /** * \brief Non-const get the value of a particular index. * \param id The index of the vector from where you want a value. * \return The value requested by param 1 * */ T &get(vertex_id_t id) { return eles[id]; } }; /** * \brief Apply a user defined function to multipl FG_vectors. * **parallel** * \param inputs A vector of FG_vectors that are the inputs. * \param output A FG_vector that are the outputs. * \param apply The user-defined function that will be applied to all vecotors. */ template<class T, class ApplyFunc> void multi_vec_apply(const std::vector<typename FG_vector<T>::ptr> &inputs, typename FG_vector<T>::ptr output, ApplyFunc apply) { for (size_t i = 0; i < inputs.size(); i++) assert(output->get_size() == inputs[i]->get_size()); #pragma omp parallel for for (size_t i = 0; i < output->get_size(); i++) output->set(i, apply(i, inputs)); } } #endif
openmp-unsupported.c
// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core.builtin -fopenmp -verify %s // expected-no-diagnostics void openmp_parallel_crash_test(void) { #pragma omp parallel ; #pragma omp parallel for for (int i = 0; i < 8; ++i) for (int j = 0, k = 0; j < 8; ++j) ; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) { for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(24*t3+Nx+20,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
multisort.h
#ifndef _MULTISORTH_ #define _MULTISORTH_ #include <omp.h> #include "merge.h" //for merge() and compare_function used in qsort() void multisort(int *array, int *space, int N) { int quarter = N/4; if(quarter<4) //if there are too few elements to work with threads, use quicksort { qsort(&array[0], N, sizeof(int), compare_function); } else { int *startA = array; int *spaceA = space; int *startB = startA + quarter; int *spaceB = spaceA + quarter; int *startC = startB + quarter; int *spaceC = spaceB + quarter; int *startD = startC + quarter; int *spaceD = spaceC + quarter; //split the input array to 4 subarrays #pragma omp task multisort(startA, spaceA, quarter); #pragma omp task multisort(startB, spaceB, quarter); #pragma omp task multisort(startC, spaceC, quarter); #pragma omp task multisort(startD, spaceD, n - 3 * quarter); #pragma omp taskwait //merge the 4 subarrays in couples #pragma omp task merge(&array[0], N/2, &space[0]); #pragma omp task merge(&array[N/2], N/2, &space[N/2]); #pragma omp taskwait //merge the couples together #pragma omp task merge(startA, startA + quarter - 1, startB, startB + quarter - 1, spaceA); #pragma omp task merge(startC, startC + quarter - 1, startD, array + n - 1, spaceC); #pragma omp taskwait merge(spaceA, spaceC - 1, spaceC, space + n - 1, array); } } #endif
GB_binop__isne_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint8) // A*D function (colscale): GB (_AxD__isne_uint8) // D*A function (rowscale): GB (_DxB__isne_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isne_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isne_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint8) // C=scalar+B GB (_bind1st__isne_uint8) // C=scalar+B' GB (_bind1st_tran__isne_uint8) // C=A+scalar GB (_bind2nd__isne_uint8) // C=A'+scalar GB (_bind2nd_tran__isne_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT8 || GxB_NO_ISNE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_deserialize_from_blob.c
//------------------------------------------------------------------------------ // GB_deserialize_from_blob: uncompress a set of blocks from the blob //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Decompress a single array from a set of compressed blocks in the blob. If // the input data is mangled, this method is still safe, since it performs the // bare minimum sanity checks to ensure no out-of-bounds indexing of arrays. // However, the contents of output array are not fully checked. This step is // done by GB_deserialize, if requested. #include "GB.h" #include "GB_serialize.h" #include "GB_lz4.h" #define GB_FREE_ALL \ { \ GB_FREE (&X, X_size) ; \ } GrB_Info GB_deserialize_from_blob ( // output: GB_void **X_handle, // uncompressed output array size_t *X_size_handle, // size of X as allocated // input: int64_t X_len, // size of X in bytes const GB_void *blob, // serialized blob of size blob_size size_t blob_size, int64_t *Sblocks, // array of size nblocks int32_t nblocks, // # of compressed blocks for this array int32_t method, // compression method used for each block // input/output: size_t *s_handle, // location to write into the blob GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (blob != NULL) ; ASSERT (s_handle != NULL) ; ASSERT (X_handle != NULL) ; ASSERT (X_size_handle != NULL) ; (*X_handle) = NULL ; (*X_size_handle) = 0 ; //-------------------------------------------------------------------------- // parse the method //-------------------------------------------------------------------------- bool intel ; int32_t algo, level ; GB_serialize_method (&intel, &algo, &level, method) ; // method = (intel ? GxB_COMPRESSION_INTEL : 0) + (algo) + (level) ; //-------------------------------------------------------------------------- // allocate the output array //-------------------------------------------------------------------------- size_t X_size = 0 ; GB_void *X = GB_MALLOC (X_len, GB_void, &X_size) ; if (X == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // decompress the blocks from the blob //-------------------------------------------------------------------------- size_t s = (*s_handle) ; bool ok = true ; if (algo == GxB_COMPRESSION_NONE) { //---------------------------------------------------------------------- // no compression; the array is held in a single block //---------------------------------------------------------------------- if (nblocks != 1 || Sblocks [0] != X_len || s + X_len > blob_size) { // blob is invalid: guard against an unsafe memcpy ok = false ; } else { // copy the blob into the array X. This is now safe and secure. // The contents of X are not yet checked, however. GB_memcpy (X, blob + s, X_len, nthreads_max) ; } } else if (algo == GxB_COMPRESSION_LZ4 || algo == GxB_COMPRESSION_LZ4HC) { //---------------------------------------------------------------------- // LZ4 / LZ4HC compression //---------------------------------------------------------------------- int nthreads = GB_IMIN (nthreads_max, nblocks) ; int32_t blockid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) \ reduction(&&:ok) for (blockid = 0 ; blockid < nblocks ; blockid++) { // get the start and end of the compressed and uncompressed blocks int64_t kstart, kend ; GB_PARTITION (kstart, kend, X_len, blockid, nblocks) ; int64_t s_start = (blockid == 0) ? 0 : Sblocks [blockid-1] ; int64_t s_end = Sblocks [blockid] ; size_t s_size = s_end - s_start ; size_t d_size = kend - kstart ; // ensure s_start, s_end, kstart, and kend are all valid, // to avoid accessing arrays out of bounds, if input is corrupted. if (kstart < 0 || kend < 0 || s_start < 0 || s_end < 0 || kstart >= kend || s_start >= s_end || s_size > INT32_MAX || s + s_start > blob_size || s + s_end > blob_size || kstart > X_len || kend > X_len || d_size > INT32_MAX) { // blob is invalid ok = false ; } else { // uncompress the compressed block of size s_size // from blob [s + s_start:s_end-1] into X [kstart:kend-1]. // This is safe and secure so far. The contents of X are // not yet checked, however. That step is done in // GB_deserialize, if requested. const char *src = (const char *) (blob + s + s_start) ; char *dst = (char *) (X + kstart) ; int src_size = (int) s_size ; int dst_size = (int) d_size ; int u = LZ4_decompress_safe (src, dst, src_size, dst_size) ; if (u != dst_size) { // blob is invalid ok = false ; } } } } else { // unknown compression method ok = false ; } if (!ok) { // decompression failure; blob is invalid GB_FREE_ALL ; return (GrB_INVALID_OBJECT) ; } //-------------------------------------------------------------------------- // return result: X, its size, and updated index into the blob //-------------------------------------------------------------------------- (*X_handle) = X ; (*X_size_handle) = X_size ; s += Sblocks [nblocks-1] ; (*s_handle) = s ; return (GrB_SUCCESS) ; }
GB_unop__minv_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_fc32_fc32 // op(A') function: GB_unop_tran__minv_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_FC32_minv (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC32_minv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_FC32_minv (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_FC32_minv (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_FC32_minv (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
levelset_fluid_solver.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 16:24:38 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED) #define KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED #define SPLIT_OSS // #define SYMM_PRESS // System includes #include <string> #include <iostream> #include <algorithm> // #include <omp.h> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "incompressible_fluid_application.h" namespace Kratos { template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver> class LevelSetFluidSolver { public: //name for the self defined structure typedef EdgesStructureType<TDim> CSR_Tuple; typedef std::vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef std::vector<unsigned int> IndicesVectorType; //defining matrix type for test calculations typedef std::vector< array_1d<double, TDim> > CalcVectorType; //defining type for local storage of nodal values typedef std::vector<double> ValuesVectorType; //defining types for matrix operations typedef typename TSparseSpace::MatrixType TSystemMatrixType; typedef typename TSparseSpace::VectorType TSystemVectorType; //constructor and destructor LevelSetFluidSolver(MatrixContainer& mr_matrix_container, ModelPart& mr_model_part, bool include_shock_capturing, bool smooth_convective_velocity ) : mr_matrix_container(mr_matrix_container),mr_model_part(mr_model_part) { //options minclude_shock_capturing = include_shock_capturing; msmooth_convective_velocity = smooth_convective_velocity; }; ~LevelSetFluidSolver() {}; //*********************************** //function to initialize fluid solver void Initialize( ) { KRATOS_TRY //get number of nodes unsigned int n_nodes = mr_model_part.Nodes().size(); unsigned int n_edges = mr_matrix_container.GetNumberEdges(); //size data vectors mWork.resize(n_nodes); mvel_n.resize(n_nodes); mvel_n1.resize(n_nodes); mInitMom.resize(n_nodes); mCurrMom.resize(n_nodes); mPn.resize(n_nodes); mPn1.resize(n_nodes); mViscosity.resize(n_nodes); mRho.resize(n_nodes); mRhoOld.resize(n_nodes); mC2inv.resize(n_nodes); mA.resize(n_nodes); mHmin.resize(n_nodes); mHavg.resize(n_nodes); mNodalFlag.resize(n_nodes); mdistances.resize(n_nodes); mEps.resize(n_nodes); mEpsOld.resize(n_nodes); mD.resize(n_nodes); mTauPressure.resize(n_nodes); mTauConvection.resize(n_nodes); mPi.resize(n_nodes); mXi.resize(n_nodes); mBodyForce.resize(n_nodes); mDrag.resize(n_nodes); mx.resize(n_nodes); mCp.resize(n_nodes); mMach.resize(n_nodes); mEdgeDimensions.resize(n_edges); mBeta.resize(n_edges); for (unsigned int csr_index = 0; csr_index < n_edges; csr_index++) mBeta[csr_index] = 1.0; ValuesVectorType external_pressure; external_pressure.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(EXTERNAL_PRESSURE, external_pressure, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(IS_BOUNDARY, mNodalFlag, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes()); mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes()); mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); //mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes()); //set flag for first time step mFirstStep = true; //loop to categorize boundary nodes for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { //differentiate between types of boundary condition switch (static_cast<unsigned int>(mNodalFlag[i_node])) { case 1: //velocity inlet mVelocityInletList.push_back(i_node); mVelocityInlet.push_back(mvel_n[i_node]); mDensityInlet.push_back(mRho[i_node]); mDissipationList.push_back(i_node); break; case 2: //no-slip condition mNoSlipBoundaryList.push_back(i_node); break; case 3: //slip condition mSlipBoundaryList.push_back(i_node); break; case 4: //mixed condition (slip and pressure node) mPressureOutletList.push_back(i_node); mPressureOutlet.push_back(external_pressure[i_node]); mSlipBoundaryList.push_back(i_node); mDissipationList.push_back(i_node); break; case 5: //pressure outlet mPressureOutletList.push_back(i_node); mPressureOutlet.push_back(external_pressure[i_node]); mDissipationList.push_back(i_node); break; } } //print number of nodes corresponding to the different types of boundary conditions KRATOS_WATCH(mVelocityInletList.size()) KRATOS_WATCH(mDensityInlet.size()) KRATOS_WATCH(mPressureOutletList.size()) KRATOS_WATCH(mSlipBoundaryList.size()) KRATOS_WATCH(mNoSlipBoundaryList.size()) KRATOS_WATCH(mDissipationList.size()) //determine number of edges and entries unsigned int n_nonzero_entries = 2 * n_edges + n_nodes; //allocate memory for variables mL.resize(n_nodes,n_nodes,n_nonzero_entries); //loop over all nodes for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { //flag for considering diagonal matrix elements bool flag = 0; //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; //define matrix structure row by row (the order does matter!) if ((j_neighbour > i_node) && (flag == 0)) { //add diagonal/nodal contribution mL.push_back(i_node, i_node, 0.0); flag = 1; } //add non-diagonal/edge contribution mL.push_back(i_node, j_neighbour, 0.0); } //if diagonal element is the last non-zero element of the row if (flag == 0) mL.push_back(i_node, i_node, 0.0); } //compute area normals CalculateNormals(mr_model_part.Conditions()); // WriteVectorToDatabase(NORMAL, mPressureNormal, mr_model_part.Nodes()); mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes()); //compute minimum length of the surrounding edges CalculateEdgeLengths(mr_model_part.Nodes()); //prepare initial momentum for first time step for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& rho_i = mRho[i_node]; array_1d<double, TDim>& u_i = mvel_n1[i_node]; array_1d<double, TDim>& U_i = mInitMom[i_node]; //compute initial momentum for iteration of step 1 for (unsigned int component = 0; component < TDim; component++) U_i[component] = rho_i * u_i[component]; } KRATOS_CATCH("") } //*************************************** //function to set adequate time step size void ComputeTimeStep(double CFLNumber) { KRATOS_TRY //local variable for time step size double delta_t = 1e10; //getting value of current velocity and of viscosity mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, mr_model_part.Nodes()); mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes()); //******************* //loop over all nodes double n_nodes = mvel_n1.size(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& v_i = mvel_n1[i_node]; // KRATOS_WATCH(v_i); array_1d<double, TDim>& x_i = mx[i_node]; // KRATOS_WATCH(x_i); //use CFL condition to compute time step size double delta_t_i = CFLNumber * 1.0 / (norm_2(v_i)/mHmin[i_node] + 2.0 * mViscosity[i_node]/(mHmin[i_node]*mHmin[i_node]) ); //considering the most restrictive case of neighbor's velocities with similar direction but opposite sense. //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; array_1d<double, TDim>& v_j = mvel_n1[j_neighbour]; array_1d<double, TDim>& x_j = mx[j_neighbour]; array_1d<double, TDim> edge_dir = ZeroVector(TDim); // KRATOS_WATCH(x_j); // KRATOS_WATCH(v_j); //Calculate edge direction edge_dir[0] = x_j[0]-x_i[0]; edge_dir[1] = x_j[1]-x_i[1]; edge_dir[2] = x_j[2]-x_i[2]; // KRATOS_WATCH(edge_dir); double aux = norm_2(edge_dir); // KRATOS_WATCH(aux); if (aux == 0.0) { edge_dir = ZeroVector(TDim); } else { //normalized edge direction edge_dir /= norm_2(edge_dir); // KRATOS_WATCH(edge_dir); } //int aux = inner_prod(v_i,v_j); double v_i_par = inner_prod(v_i, edge_dir); double v_j_par = inner_prod(v_j, edge_dir); // KRATOS_WATCH(v_i_par); // KRATOS_WATCH(v_j_par); if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0)) { double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par))/mHmin[i_node] + 2.0 * mViscosity[i_node]/(mHmin[i_node]*mHmin[i_node])); // KRATOS_WATCH(delta_t_j); // KRATOS_WATCH(delta_t_i); if (delta_t_j < delta_t_i) delta_t_i = delta_t_j; } } //choose the overall minimum of delta_t_i if (delta_t_i < delta_t) delta_t = delta_t_i; } //******************* //perform MPI syncronization of the dt (minimum should be kept) //write time step size to Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); CurrentProcessInfo[DELTA_TIME] = delta_t; KRATOS_CATCH("") } //********************************************************************************** //function to solve fluid equations - fractional step 1: compute fractional momentum Vector SolveStep1() { KRATOS_TRY //PREREQUISITES //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables CalcVectorType rhs; rhs.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes); mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes); mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes); mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes); mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, rNodes); mr_matrix_container.FillOldScalarFromDatabase(DENSITY, mRhoOld, rNodes); mr_matrix_container.FillVectorFromDatabase(BODY_FORCE, mBodyForce, rNodes); mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, rNodes); mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, rNodes); mr_matrix_container.FillOldScalarFromDatabase(POROSITY, mEpsOld, rNodes); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) { // -> mCurrMom //compute the momentum at the current step -> mCurrMom double& rho_i = mRho[i_node]; const array_1d<double, TDim>& u_i = mvel_n1[i_node]; array_1d<double, TDim>& U_i = mCurrMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = rho_i * u_i[comp]; // -> mInitMom double& rho_i_old = mRhoOld[i_node]; //compute the momentum at the beginning of the step const array_1d<double, TDim>& u_i_old = mvel_n[i_node]; array_1d<double, TDim>& U_i_old = mInitMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i_old[comp] = rho_i_old * u_i_old[comp]; //compute volumetric body force array_1d<double, TDim>& f_i = mBodyForce[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) f_i[comp] *= rho_i; } DivideByPorosity(mCurrMom, mCurrMom, mEps); DivideByPorosity(mInitMom, mInitMom, mEpsOld); DivideByPorosity(mvel_n, mvel_n, mEpsOld); DivideByPorosity(mvel_n1, mvel_n1, mEps); //compute advective velocity - area average of the current velocity CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity); //compute intrinsic time double time_inv = 1.0/delta_t; // time_inv = 0.0; #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { // double& h_i = mHavg[i_node]; double& h_i = mHmin[i_node]; array_1d<double, TDim>& a_i = mA[i_node]; const double nu_i = mViscosity[i_node]; // mTau[i_node] = 1.0 / (0.5 * norm_2(a_i)/h_i + time_inv); double vel_norm = norm_2(a_i); // mTauPressure[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); mTauPressure[i_node] = delta_t; mTauConvection[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); if (mTauPressure[i_node] < delta_t) mTauPressure[i_node] = delta_t; else if(mTauPressure[i_node] > 100.0*delta_t) mTauPressure[i_node] = 100.0*delta_t; } //compute pressure switch if (mFirstStep == false) if(minclude_shock_capturing == true) ComputeMonotonicityPreserving(); mr_matrix_container.AssignVectorToVector(mInitMom,mWork); //mWork = mvel_n NO!!!-> mWork = mU_iold //first step of Runge Kutta mr_matrix_container.AssignVectorToVector(mvel_n,mvel_n1); //mvel_n1 = mvel_n mr_matrix_container.AssignVectorToVector(mInitMom,mCurrMom); // double start_prod = omp_get_wtime(); CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce, mViscosity, rhs); /*double norma=0.0; for (int i_node = 0; i_node < n_nodes; i_node++) for (int kkk = 0; kkk < TDim; kkk++) norma += rhs[i_node][kkk]*rhs[i_node][kkk]; KRATOS_WATCH(norma);*/ mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, 0.5*delta_t , mr_matrix_container.GetInvertedMass(), rhs); ApplyVelocityBC(mCurrMom); /*mr_matrix_container.WriteVectorToDatabase(CONV_PROJ, mA, rNodes); mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, mTauConvection, rNodes);*/ //second step CalculateVelocity(mvel_n1,mCurrMom,mRho); CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs ); mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, 0.5*delta_t , mr_matrix_container.GetInvertedMass(),rhs); ApplyVelocityBC(mCurrMom); //third step CalculateVelocity(mvel_n1,mCurrMom,mRho); CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs); mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, delta_t , mr_matrix_container.GetInvertedMass(), rhs); ApplyVelocityBC(mCurrMom); //fourth step CalculateVelocity(mvel_n1,mCurrMom,mRho); CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs ); mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs); ApplyVelocityBC(mCurrMom); //compute right-hand side mr_matrix_container.AssignVectorToVector(mWork,mCurrMom); ApplyVelocityBC(mCurrMom); // //compute ratio for iteration Vector stop_criteria(TDim); noalias(stop_criteria) = ZeroVector(TDim); // stop_criteria[0] = 0.0; // stop_criteria[1] = 0.0; return stop_criteria; KRATOS_CATCH("") } //********************************************************************* //function to calculate right-hand side of fractional momentum equation void CalculateRHS( const CalcVectorType& momentum, const ValuesVectorType& pressure, const CalcVectorType& convective_velocity, const CalcVectorType& body_force, const ValuesVectorType& viscosity, CalcVectorType& rhs) { KRATOS_TRY int n_nodes = momentum.size(); //calculating the convective projection #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPi[i_node]; //****************** //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] = 0.0; const array_1d<double, TDim>& a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = momentum[i_node]; //const double& p_i = pressure[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour]; const array_1d<double, TDim>& U_j = momentum[j_neighbour]; //const double& p_j = pressure[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Add_ConvectiveContribution(pi_i,a_i,U_i,a_j,U_j); // edge_ij.Add_grad_p(pi_i,p_i,p_j); // edge_ij.Sub_grad_p(pi_i,p_i,p_j); } const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] *= m_inv; } //perform MPI syncronization //calculating the RHS array_1d<double,TDim> stab_low; array_1d<double,TDim> stab_high; #pragma omp parallel for private(stab_low,stab_high) for ( int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& f_i = body_force[i_node]; const array_1d<double, TDim>& a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = momentum[i_node]; const array_1d<double, TDim>& pi_i = mPi[i_node]; const double& p_i = pressure[i_node]; const double& nu_i = viscosity[i_node]; //double& h_i = mHmin[i_node]; //initializing with the external forces (e.g. gravity) double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] = m_i * f_i[comp]; //porous contribution double eps = mEps[i_node]; double d = mD[i_node]; //diameter of the particle double kinv = 150.0*(1.0-eps)*(1.0-eps)/(eps*eps*eps*d*d); double norm_u_2 = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) norm_u_2 = a_i[comp]*a_i[comp]; // norm_u_2 = U_i[comp]*U_i[comp]; //CORRECTED Term double nonlin_term = kinv * nu_i * eps + 1.75 * sqrt(norm_u_2 * kinv / (eps * 150.0)); //ERROR IN WRITING THE NON LINEAR TERM// // double nonlin_term = kinv * nu_i * eps + 1.75 * norm_u_2 * sqrt(kinv / ( eps * 150.0)); for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] -= m_i * nonlin_term * U_i[comp]; //convective term for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour]; const array_1d<double, TDim>& U_j = momentum[j_neighbour]; const array_1d<double, TDim>& pi_j = mPi[j_neighbour]; const double& p_j = pressure[j_neighbour]; const double& nu_j = viscosity[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Sub_ConvectiveContribution(rhs_i,a_i,U_i,a_j,U_j); //take care! we miss including a B.C. for the external pressure edge_ij.Add_Gp(rhs_i,p_i,p_j); edge_ij.Sub_ViscousContribution(rhs_i,U_i,nu_i,U_j,nu_j); //add stabilization // edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,U_i,p_i,a_j,U_j,p_j); edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,U_i,a_j,U_j); double edge_tau = mTauConvection[i_node]; edge_ij.CalculateConvectionStabilization_HIGH( stab_high,a_i,pi_i,a_j,pi_j); double beta = mBeta[csr_index]; edge_ij.Sub_StabContribution( rhs_i, edge_tau, beta, stab_low, stab_high); } } } //boundary integrals --> finishing the calculation of the pressure gradient int loop_size1 = mPressureOutletList.size(); #pragma omp parallel for for (int i_pressure = 0; i_pressure < loop_size1; i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; array_1d<double, TDim>& rhs_i = rhs[i_node]; const double& p_ext_i = mPressureOutlet[i_pressure]; const array_1d<double, TDim>& an_i = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] -= an_i[comp] * p_ext_i; // const array_1d<double, TDim>& U_i = momentum[i_node]; // const array_1d<double, TDim>& a_i = convective_velocity[i_node]; // double temp = 0.0; // double scalar_prod = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // scalar_prod += an_i[comp] * U_i[comp]; // temp += an_i[comp] * an_i[comp]; // } // temp = sqrt(temp); // for (unsigned int comp = 0; comp < TDim; comp++) // // rhs_i[comp] -= U_i[comp] * temp; // // rhs_i[comp] -= an_i[comp] * scalar_prod / temp; // rhs_i[comp] -= a_i[comp] * scalar_prod / temp; } KRATOS_CATCH("") } //************************************************************************* //function to solve fluid equations - fractional step 2: calculate pressure void SolveStep2(typename TLinearSolver::Pointer pLinearSolver) { KRATOS_TRY //PREREQUISITES //allocate memory for variables ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //unknown and right-hand side vector TSystemVectorType dp, rhs; dp.resize(n_nodes); rhs.resize(n_nodes); array_1d<double, TDim> dU_i, dU_j, work_array; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #ifdef _OPENMP double time_inv = 0.0; //1.0/delta_t; #endif #ifdef SPLIT_OSS // #pragma omp parallel for firstprivate(time_inv), private(work_array) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& xi_i = mXi[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) xi_i[comp] = 0.0; const double& p_i = mPn1[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const double& p_j = mPn1[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Add_grad_p(xi_i,p_i,p_j); // // // edge_ij.Sub_grad_p(xi_i,p_i,p_j); } const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) xi_i[l_comp] *= m_inv; } #endif //loop over all nodes #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; rhs_i = 0.0; double& p_i = mPn1[i_node]; double& eps_i = mEps[i_node]; array_1d<double, TDim>& U_i_curr = mCurrMom[i_node]; //array_1d<double, TDim>& a_i = mA[i_node]; double& rho_i = mRho[i_node]; #ifdef SPLIT_OSS array_1d<double, TDim>& xi_i = mXi[i_node]; #else array_1d<double, TDim>& pi_i = mPi[i_node]; #endif //const double& h_i = mHavg[i_node]; double l_ii = 0.0; //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; double& p_j = mPn1[j_neighbour]; double& eps_j = mEps[j_neighbour]; array_1d<double, TDim>& U_j_curr = mCurrMom[j_neighbour]; //array_1d<double, TDim>& a_j = mA[j_neighbour]; #ifdef SPLIT_OSS array_1d<double, TDim>& xi_j = mXi[j_neighbour]; #else array_1d<double, TDim>& pi_j = mPi[j_neighbour]; #endif //const double& h_j = mHavg[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; #ifdef SYMM_PRESS double edge_tau = 0.5*( mTauPressure[i_node] + mTauPressure[j_neighbour]); #else double edge_tau = mTauPressure[i_node]; #endif // double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j); // //compute laplacian operator double sum_l_ikjk; edge_ij.CalculateScalarLaplacian(sum_l_ikjk); double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau); sum_l_ikjk *= (delta_t + edge_tau); //assemble right-hand side //pressure contribution rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i); //other part of the residual #if !defined(SPLIT_OSS) array_1d<double, TDim>& a_j = mA[j_neighbour]; boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& L = edge_ij.LaplacianIJ; for(unsigned int i = 0; i<TDim; i++) for(unsigned int j = 0; j<TDim; j++) rhs_i -= edge_tau * a_i[j] * L(i,j) * (U_j_curr[j] - U_i_curr[j]); #endif //calculating the divergence of the fract vel edge_ij.Sub_D_v(rhs_i,U_i_curr * eps_i,U_j_curr * eps_j); // edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i); //high order stabilizing term double temp = 0.0; #ifdef SPLIT_OSS // edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j); edge_ij.Add_div_v(temp,xi_i,xi_j); #else edge_ij.Add_div_v(temp,pi_i,pi_j); #endif temp *= mBeta[csr_index]; rhs_i += edge_tau * temp; // rhs_i += temp; //assemble laplacian matrix mL(i_node, j_neighbour) = sum_l_ikjk; l_ii -= sum_l_ikjk; } mL(i_node, i_node) = l_ii; //add density variation contribution const double& rho_i_old = mRhoOld[i_node]; const double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; rhs_i -= m_i * (rho_i - rho_i_old)/delta_t; //add mass contribution for compressible flows /* double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; mL(i_node, i_node) += mC2inv[i_node] * m_i / delta_t;*/ } //find the max diagonal term double max_diag = 0.0; for (int i_node = 0; i_node < n_nodes; i_node++) { double L_diag = mL(i_node, i_node); if(fabs(L_diag) > fabs(max_diag)) max_diag = L_diag; } //respect pressure boundary conditions by penalization double huge = max_diag * 1e30; for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; mL(i_node, i_node) = huge; rhs[i_node] = 0.0; } //modification for level_set mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); //selecting nodes for fixing pressure // std::vector< unsigned int > aux(mdistances.size()); // for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) // aux[i_dist] = 0; // for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) // { // if(mdistances[i_dist] > 0) // { // aux[i_dist] = 1; // /* for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_dist]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_dist+1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // aux[j_neighbour] = 1; // // }*/ // } // // } for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) { // if(aux[i_dist] != 0) if(mdistances[i_dist] > 0) { // mPn1[i_dist] = 0.0; mL(i_dist, i_dist) = huge; rhs[i_dist] = 0.0; } } //set starting vector for iterative solvers for (int i_node = 0; i_node < n_nodes; i_node++) dp[i_node] = 0.0; //solve linear equation system L dp = rhs pLinearSolver->Solve(mL,dp,rhs); KRATOS_WATCH(*pLinearSolver) //update pressure for (int i_node = 0; i_node < n_nodes; i_node++) mPn1[i_node] += dp[i_node]; for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; mPn1[i_node] = mPressureOutlet[i_pressure]; } //calculate density variation from pressure variation // for (unsigned int i_node = 0; i_node < n_nodes; i_node++) // mRho[i_node] = mRhoOld[i_node] + dp[i_node] * mC2inv[i_node]; // for (unsigned int i_density = 0; i_density < mDensityInlet.size(); i_density++) // { // unsigned int i_node = mVelocityInletList[i_density]; // mRho[i_node] = mDensityInlet[i_density]; // } //write pressure and density to Kratos mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes); // mr_matrix_container.WriteScalarToDatabase(DENSITY, mRho, rNodes); KRATOS_CATCH("") } //********************************************************************************** //function to solve fluid equations - fractional step 3: correct fractional momentum void SolveStep3() { KRATOS_TRY //get number of nodes ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //CANCELLAAAAAAA è necessario??! Non lo sto riempendo con nulla....e ad ogni passo di tempo è nuovo.... mr_matrix_container.FillVectorFromDatabase(SEEPAGE_DRAG, mDrag, rNodes); //CORRECT FRACTIONAL MOMENTUM //define work array array_1d<double, TDim> correction; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; //compute end of step momentum #pragma omp parallel for private(correction) firstprivate(delta_t) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { array_1d<double, TDim>& U_i_curr = mCurrMom[i_node]; double delta_p_i = mPn1[i_node] - mPn[i_node]; const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) correction[l_comp] = 0.0; //compute edge contributions dt*M^(-1)Gp for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; double delta_p_j = mPn1[j_neighbour] - mPn[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Add_Gp(correction,delta_p_i,delta_p_j); // edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j); } //compute prefactor double coefficient = delta_t * m_inv; //correct fractional momentum for (unsigned int comp = 0; comp < TDim; comp++) U_i_curr[comp] += coefficient * correction[comp]; } } ApplyVelocityBC(mCurrMom); CalculateVelocity(mvel_n1,mCurrMom,mRho); MultiplyByPorosity(mvel_n1, mvel_n1, mEps); //write velocity of time step n+1 to Kratos mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes); CalculateDrag(mA, mCurrMom, mDrag, mViscosity); //CALCULATE THE DRAG MATRIX TO PASS TO THE SOLID PART mr_matrix_container.WriteVectorToDatabase(SEEPAGE_DRAG, mDrag, rNodes); KRATOS_CATCH("") } //************************************ //function to calculate speed of sound void SolveStep4(ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //get number of nodes int n_nodes = mC2inv.size(); //compute speed of sound using equation of state #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double& rho_i = mRho[i_node]; double p_i_abs = mPn1[i_node]; mC2inv[i_node] = rho_i / (mGamma * p_i_abs); } KRATOS_CATCH("") } //************************************ void ApplyVelocityBC(CalcVectorType& MomentumArray) { KRATOS_TRY //velocity inlet int inlet_size = mVelocityInletList.size(); #pragma omp parallel for schedule(static) for (int i_velocity = 0; i_velocity < inlet_size; i_velocity++) { unsigned int i_node = mVelocityInletList[i_velocity]; array_1d<double, TDim>& u_i = mVelocityInlet[i_velocity]; double& rho_i = mDensityInlet[i_velocity]; array_1d<double, TDim>& U_i = MomentumArray[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = rho_i * u_i[comp]; } //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; array_1d<double, TDim>& U_i = MomentumArray[i_node]; array_1d<double, TDim>& an_i = mSlipNormal[i_node]; double projection_length = 0.0; double normalization = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; normalization += an_i[comp] * an_i[comp]; } projection_length /= normalization; //tangential momentum as difference between original and normal momentum for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] -= projection_length * an_i[comp]; } //no-slip condition int no_slip_size = mNoSlipBoundaryList.size(); #pragma omp parallel for for (int i_noslip = 0; i_noslip < no_slip_size; i_noslip++) { unsigned int i_node = mNoSlipBoundaryList[i_noslip]; array_1d<double, TDim>& U_i = MomentumArray[i_node]; noalias(U_i) = ZeroVector(TDim); } KRATOS_CATCH("") } //******************************** //function to compute coefficients void ExtrapolateVelocities(unsigned int extrapolation_layers) { KRATOS_TRY typedef Node<3> PointType; typedef PointerVector<PointType > PointVector; typedef PointVector::iterator PointIterator; //reset is visited flag for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->GetValue(IS_VISITED) = 0; } //generate a container with the layers to be extrapolated std::vector< PointVector > layers(extrapolation_layers); //detect the nodes inside the fluid surface for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { if( inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain { WeakPointerVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES); for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { if(i->FastGetSolutionStepValue(DISTANCE) > 0) //add the node as free surface if one of its neighb is outside { if( inode->GetValue(IS_VISITED) == 0) { layers[0].push_back( *(inode.base() ) ); inode->GetValue(IS_VISITED) = 1; } } } } } // //reset is visited flag // for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); // inode != mr_model_part.NodesEnd(); // inode++) // { // inode->GetValue(IS_VISITED) = 0; // } //fill the following layers by neighbour relationships //each layer fills the following for(unsigned int il = 0; il<extrapolation_layers-1; il++) { for( PointIterator iii=(layers[il]).begin(); iii!=(layers[il]).end(); iii++) { WeakPointerVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES); for(WeakPointerVector< Node<3> >::iterator jjj=neighb_nodes.begin(); jjj !=neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin { if( jjj->FastGetSolutionStepValue(DISTANCE) > 0 && jjj->GetValue(IS_VISITED) == 0.0 ) { layers[il+1].push_back( Node<3>::Pointer( *(jjj.base() ) ) ); jjj->GetValue(IS_VISITED) = double(il+2.0); } } } } //perform extrapolation layer by layer by making an average //of the neighbours of lower order array_1d<double,3> aux; for(unsigned int il = 1; il<extrapolation_layers; il++) { for( PointIterator iii=layers[il].begin(); iii!=layers[il].end(); iii++) { // noalias(aux) = ZeroVector(3); // double dist_min = 10000000000.0; // // array_1d<double,3>& coords_I = iii->Coordinates(); // // WeakPointerVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES); // for(WeakPointerVector< Node<3> >::iterator j=neighb_nodes.begin(); j !=neighb_nodes.end(); j++) // { // if(j->GetValue(IS_VISITED) < il+1) //if it is on the next layer // { // array_1d<double,3>& coords_J = j->Coordinates(); // // double dist = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // dist += pow(coords_I[comp]-coords_J[comp],2); // // if(dist < dist_min) // { // dist_min = dist; // noalias( iii->FastGetSolutionStepValue(VELOCITY) ) = j->FastGetSolutionStepValue(VELOCITY); // } // // } // } //extrapolate the average velocity noalias(aux) = ZeroVector(3); double avg_number = 0.0; WeakPointerVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES); for(WeakPointerVector< Node<3> >::iterator i=neighb_nodes.begin(); i !=neighb_nodes.end(); i++) { if(i->GetValue(IS_VISITED) < il+1 && i->GetValue(IS_VISITED)) { noalias(aux) += i->FastGetSolutionStepValue(VELOCITY); avg_number += 1.0; } } if(avg_number != 0.0) aux /= avg_number; noalias( iii->FastGetSolutionStepValue(VELOCITY) ) = aux; // noalias( iii->FastGetSolutionStepValue(VELOCITY,1) ) = aux; } } // mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); // // unsigned int n_nodes = mPn1.size(); // // //pressure coefficient // // #pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // const double dist_i = mdistances[i_node]; // // // if( dist_i > 0.0) // { // double nn = 0.0; // // array_1d<double, TDim>& vel_i = mvel_n1[i_node]; // // for (unsigned int comp = 0; comp < TDim; comp++) // vel_i[comp] = 0.0; // // //compute edge contributions dt*M^(-1)Gp // for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // // const double dist_j = mdistances[j_neighbour]; // // if(dist_j <= 0.0) // { // const array_1d<double, TDim>& vel_j = mvel_n1[j_neighbour]; // // for (unsigned int comp = 0; comp < TDim; comp++) // vel_i[comp] += vel_j[comp]; // // nn += 1.0; // // } // } // // if(nn> 1e-6) //it should be either 0 1 .. N // { // // std::cout << "inode= " << i_node << "nn = " << nn << std::endl; // // double inv_nn = 1.0/nn; // for (unsigned int comp = 0; comp < TDim; comp++) // vel_i[comp] *= inv_nn; // KRATOS_WATCH(vel_i); // } // // } // } // // // // // ApplyVelocityBC(mCurrMom); // // // //write velocity of time step n+1 to Kratos // mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); KRATOS_CATCH("") } void ChangeSignToDistance() { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue(DISTANCE); inode->FastGetSolutionStepValue(DISTANCE) = -dist; } KRATOS_CATCH("") } void MarkNodesByDistance(double min, double max ) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue(DISTANCE); if(dist > min && dist < max) inode->GetValue(IS_VISITED) = 1; else inode->GetValue(IS_VISITED) = 0; } KRATOS_CATCH("") } void SaveScalarVariableToOldStep(Variable<double>& rVar) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->FastGetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar); } KRATOS_CATCH("") } void MarkExternalAndMixedNodes( ) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->GetValue(IS_VISITED) = 0; } //detect the nodes inside the fluid surface for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { if( inode->FastGetSolutionStepValue(DISTANCE) > 0.0) //candidates are only the ones inside the fluid domain { inode->GetValue(IS_VISITED) = 1; WeakPointerVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES); for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { i->GetValue(IS_VISITED) = 1; } } } KRATOS_CATCH("") } void MarkInternalAndMixedNodes( ) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->GetValue(IS_VISITED) = 0; } //detect the nodes inside the fluid surface for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { if( inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain { inode->GetValue(IS_VISITED) = 1; WeakPointerVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES); for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { i->GetValue(IS_VISITED) = 1; } } } KRATOS_CATCH("") } void CalculateVariablesDistribution(double rho_dense, double rho_light, double nu_dense, double nu_light, double eps, const array_1d<double,3>& body_force) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue(DISTANCE); //calculated smoothed density and viscosity distribution double H; if(dist < -eps) H = 0.0; else if(dist > eps) H = 1.0; else H = (dist+eps)/(2.0*eps) + sin(3.141592*dist/eps)/(2.0*3.141592); double rho_node = rho_dense + (rho_light-rho_dense)*H; inode->FastGetSolutionStepValue(DENSITY) = rho_node; double nu_node = nu_dense + (nu_light-nu_dense)*H; inode->FastGetSolutionStepValue(VISCOSITY) = nu_node; //reset variables outside of the fluid domain if( dist < 0 ) noalias(inode->FastGetSolutionStepValue(BODY_FORCE)) = body_force; else { inode->FastGetSolutionStepValue(PRESSURE) = 0.0; noalias(inode->FastGetSolutionStepValue(BODY_FORCE)) = body_force; noalias(inode->FastGetSolutionStepValue(VELOCITY)) = ZeroVector(3); noalias(inode->FastGetSolutionStepValue(VELOCITY,1)) = ZeroVector(3); } } KRATOS_CATCH("") } //******************************** //function to compute coefficients void CalculateCoefficients(ModelPart::NodesContainerType& rNodes) { KRATOS_TRY unsigned int n_nodes = mPn1.size(); //pressure coefficient #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) mCp[i_node] = (mPn1[i_node] - mPinf) / mQinf; mr_matrix_container.WriteScalarToDatabase(PRESSURE_COEFFICIENT, mCp, rNodes); //Mach number #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) mMach[i_node] = norm_2(mvel_n1[i_node]) * sqrt(mC2inv[i_node]); mr_matrix_container.WriteScalarToDatabase(MACH_NUMBER, mMach, rNodes); KRATOS_CATCH("") } //************************************** //function to calculate the area normals void CalculateNormals(ModelPart::ConditionsContainerType& rConditions) //void CalculateNormals(ModelPart::NodesContainerType& rNodes, MatrixContainer& matrix_container) { KRATOS_TRY //calculate area normals face-by-face array_1d<double,3> area_normal; //2D case if(TDim == 2) { for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++) CalculateNormal2D(cond_it,area_normal); } //3D case else if(TDim == 3) { //help vectors for cross product array_1d<double,3> v1; array_1d<double,3> v2; for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++) CalculateNormal3D(cond_it,area_normal,v1,v2); } //(re)initialize normals unsigned int n_nodes = mNodalFlag.size(); mSlipNormal.resize(n_nodes); mPressureNormal.resize(n_nodes); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { noalias(mSlipNormal[i_node]) = ZeroVector(TDim); noalias(mPressureNormal[i_node]) = ZeroVector(TDim); } //loop over all faces for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node<3> >& face_geometry = cond_it->GetGeometry(); //boolean variables to characterize faces bool is_slip_condition = true; bool is_pressure_face = true; bool is_velocity_inlet = true; for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); //if the face contains at least 1 node that is not of slip or mixed //then it is not a slip face if ( static_cast<unsigned int>(mNodalFlag[i_node]) != 3 && static_cast<unsigned int>(mNodalFlag[i_node]) != 4) is_slip_condition = false; //if the face contains at least one node of pressure it is a pressure face if ( static_cast<unsigned int>(mNodalFlag[i_node]) != 5 && static_cast<unsigned int>(mNodalFlag[i_node]) != 4) is_pressure_face = false; if (static_cast<unsigned int>(mNodalFlag[i_node]) != 1) is_velocity_inlet = false; } //reference for area normal of the face array_1d<double,3>& face_normal = cond_it->GetValue(NORMAL); double node_factor = 1.0/TDim; //slip condition if (is_slip_condition == true) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); array_1d<double,TDim>& slip_normal = mSlipNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) slip_normal[comp] += node_factor * face_normal[comp]; } //pressure face if (is_pressure_face == true || is_velocity_inlet == true) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); array_1d<double,TDim>& pressure_normal = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) pressure_normal[comp] += node_factor * face_normal[comp]; } //remaining case ... add pressure to pressure nodes and slip to the others if(is_pressure_face == false && is_slip_condition == false && is_velocity_inlet == false) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); if ( static_cast<unsigned int>(mNodalFlag[i_node]) == 5) //pressure node { array_1d<double,TDim>& pressure_normal = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) pressure_normal[comp] += node_factor * face_normal[comp]; } else if ( static_cast<unsigned int>(mNodalFlag[i_node]) == 3) //slip node { array_1d<double,TDim>& slip_normal = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) slip_normal[comp] += node_factor * face_normal[comp]; } } } KRATOS_CATCH("") } void SetSpeedOfSound(double c, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY unsigned int n_nodes = mC2inv.size(); double temp = 1.0 / (c * c); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) mC2inv[i_node] = temp; //WriteScalarToDatabase(LIFT_COEFFICIENT, mC2inv, rNodes); KRATOS_CATCH("") } void SetFreeFlowConditions(array_1d<double, 3> velocity, double pressure, double density, double gamma) { KRATOS_TRY mUinf = velocity; mPinf = pressure; mRhoinf = density; mGamma = gamma; mQinf = 0.5 * mRhoinf * norm_2(mUinf) * norm_2(mUinf); mMachinf = norm_2(mUinf) / (sqrt(mGamma*mPinf/mRhoinf)); unsigned int n_nodes = mPn1.size(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) mC2inv[i_node] = mRho[i_node] / (mGamma * mPn1[i_node]); for (unsigned int i_velocity = 0; i_velocity < mVelocityInletList.size(); i_velocity++) noalias(mVelocityInlet[i_velocity]) = velocity; KRATOS_CATCH("") } //********************************************************************** void CalculateVelocity( CalcVectorType& velocity, const CalcVectorType& momentum, const ValuesVectorType& rho) { int loop_size = velocity.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double inv_rho = 1.0/mRho[i_node]; array_1d<double,TDim>& vel = velocity[i_node]; const array_1d<double,TDim>& mom = momentum[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) vel[comp] = mom[comp] * inv_rho; } } void SetDissipationLength(double h) { KRATOS_TRY mDissipationLength = h; KRATOS_CATCH("") } void CalculateDrag (CalcVectorType& convective_velocity, CalcVectorType& momentum, CalcVectorType& drag, const ValuesVectorType& viscosity) { mViscosity = viscosity; int n_nodes = mViscosity.size(); for ( int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { const array_1d<double, TDim>& a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = momentum[i_node]; array_1d<double, TDim>& Drag_i = drag[i_node]; const double& nu_i = viscosity[i_node]; //porous contribution double eps = mEps[i_node]; double d = mD[i_node]; //diameter of the particle double kinv = 150.0*(1.0-eps)*(1.0-eps)/(eps*eps*eps*d*d); double norm_u_2 = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) norm_u_2 = a_i[comp]*a_i[comp]; //CORRECTED Term double nonlin_term = kinv * nu_i * eps + 1.75 * norm_u_2 * sqrt(kinv / ( eps * 150.0)); for (unsigned int comp = 0; comp < TDim; comp++) Drag_i[comp] = nonlin_term * U_i[comp]; } } } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mWork.clear(); mvel_n.clear(); mvel_n1.clear(); mA.clear(); mPn.clear(); mPn1.clear(); mHmin.clear(); mHavg.clear(); //mAreaNormal.clear(); //mvel_nitNormal.clear(); mPressureNormal.clear(); mSlipNormal.clear(); mNodalFlag.clear(); mVelocityInletList.clear(); mVelocityInlet.clear(); mPressureOutletList.clear(); mPressureOutlet.clear(); mSlipBoundaryList.clear(); mNoSlipBoundaryList.clear(); mL.clear(); mTauPressure.clear(); mTauConvection.clear(); mViscosity.clear(); mEps.clear(); mEpsOld.clear(); KRATOS_CATCH("") } //****************************************** void CalculateForces() { KRATOS_TRY //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables CalcVectorType rhs; rhs.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes); mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes); mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes); mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes); mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, rNodes); mr_matrix_container.FillOldScalarFromDatabase(DENSITY, mRhoOld, rNodes); mr_matrix_container.FillVectorFromDatabase(BODY_FORCE, mBodyForce, rNodes); mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, rNodes); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) { // -> mCurrMom //compute the momentum at the current step -> mCurrMom double& rho_i = mRho[i_node]; const array_1d<double, TDim>& u_i = mvel_n1[i_node]; array_1d<double, TDim>& U_i = mCurrMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = rho_i * u_i[comp]; // -> mInitMom double& rho_i_old = mRhoOld[i_node]; //compute the momentum at the beginning of the tep const array_1d<double, TDim>& u_i_old = mvel_n[i_node]; array_1d<double, TDim>& U_i_old = mInitMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i_old[comp] = rho_i_old * u_i_old[comp]; //compute volumetric body force array_1d<double, TDim>& f_i = mBodyForce[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) f_i[comp] *= rho_i; } //compute advective velocity - area average of the current velocity CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity); //compute intrinsic time double time_inv = 1.0/delta_t; #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { // double& h_i = mHavg[i_node]; double& h_i = mHmin[i_node]; array_1d<double, TDim>& a_i = mA[i_node]; const double nu_i = mViscosity[i_node]; double vel_norm = norm_2(a_i); mTauPressure[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); mTauConvection[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); if (mTauPressure[i_node] < delta_t) mTauPressure[i_node] = delta_t; else if(mTauPressure[i_node] > 100.0*delta_t) mTauPressure[i_node] = 100.0*delta_t; } //compute pressure switch if (mFirstStep == false) if(minclude_shock_capturing == true) ComputeMonotonicityPreserving(); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce, mViscosity, rhs); ValuesVectorType& lumped_mass = mr_matrix_container.GetLumpedMass(); //add inertia term #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& curr_mom_i = mCurrMom[i_node]; const array_1d<double, TDim>& old_mom_i = mInitMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp]-=time_inv*lumped_mass[i_node]*(curr_mom_i[comp]-old_mom_i[comp]); //change of sign /* for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] = -rhs_i[comp];*/ } mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, mr_model_part.Nodes()); KRATOS_CATCH("") } private: MatrixContainer& mr_matrix_container; ModelPart& mr_model_part; bool msmooth_convective_velocity; bool minclude_shock_capturing; //nodal values //velocity vector U at time steps n and n+1 CalcVectorType mWork, mvel_n, mvel_n1, mInitMom, mCurrMom, mFracMom, mx; //pressure vector p at time steps n and n+1 ValuesVectorType mPn, mPn1, mViscosity; //monotony preserving term ValuesVectorType mBeta; //density ValuesVectorType mRho, mRhoOld; //compressibility parameter ValuesVectorType mC2inv; double mGamma; double mQinf; array_1d<double, TDim> mUinf; double mPinf; double mRhoinf; double mMachinf; //coefficients ValuesVectorType mCp, mMach, mdistances; //advective velocity vector CalcVectorType mA; //minimum length of the edges surrounding edges surrounding each nodal point ValuesVectorType mHmin; ValuesVectorType mHavg; ValuesVectorType mEps; ValuesVectorType mEpsOld; ValuesVectorType mD; CalcVectorType mEdgeDimensions; double mDissipationLength; //area normal //CalcVectorType mAreaNormal, mvel_nitNormal; CalcVectorType mPressureNormal, mSlipNormal; //projection terms CalcVectorType mPi, mXi; CalcVectorType mBodyForce, mDrag; //flag for first time step bool mFirstStep; //flag to differentiate interior and boundary nodes ValuesVectorType mNodalFlag; //lists of nodes with different types of boundary conditions IndicesVectorType mSlipBoundaryList, mNoSlipBoundaryList, mPressureOutletList, mVelocityInletList; IndicesVectorType mDissipationList; CalcVectorType mVelocityInlet; ValuesVectorType mPressureOutlet, mDensityInlet; //list for pressure boundary faces ModelPart::ConditionsContainerType mPressureFaces; //intrinsic time step size ValuesVectorType mTauPressure; ValuesVectorType mTauConvection; //variables for resolving pressure equation //laplacian matrix TSystemMatrixType mL; //*********************************************************** //functions to calculate area normals for boundary conditions void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double,3>& area_normal) { Geometry<Node<3> >& face_geometry = (cond_it)->GetGeometry(); area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y(); area_normal[1] = - (face_geometry[1].X() - face_geometry[0].X()); area_normal[2] = 0.00; noalias((cond_it)->GetValue(NORMAL)) = area_normal; } void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double,3>& area_normal, array_1d<double,3>& v1,array_1d<double,3>& v2 ) { Geometry<Node<3> >& face_geometry = (cond_it)->GetGeometry(); v1[0] = face_geometry[1].X() - face_geometry[0].X(); v1[1] = face_geometry[1].Y() - face_geometry[0].Y(); v1[2] = face_geometry[1].Z() - face_geometry[0].Z(); v2[0] = face_geometry[2].X() - face_geometry[0].X(); v2[1] = face_geometry[2].Y() - face_geometry[0].Y(); v2[2] = face_geometry[2].Z() - face_geometry[0].Z(); MathUtils<double>::CrossProduct(area_normal,v1,v2); area_normal *= -0.5; noalias((cond_it)->GetValue(NORMAL)) = area_normal; } //****************************************** //function to calculate advective velocities void CalculateAdvectiveVelocity(const CalcVectorType& rVelocity, CalcVectorType& rAdvectiveVelocity, bool smooth_convective_velocity) { KRATOS_TRY if(smooth_convective_velocity == true) { //get number of nodes int n_nodes = rVelocity.size(); //initialize advective velocities /* #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) noalias(rAdvectiveVelocity[i_node]) = ZeroVector(TDim);*/ //loop over all nodes #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { //reference for advective velocity of node i array_1d<double, TDim>& a_i = rAdvectiveVelocity[i_node]; noalias(a_i) = ZeroVector(TDim); //setting weighting mass to zero double mass_sum = 0.0; //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //add consistent mass of edge ij to denominator double& m_ij = mr_matrix_container.GetEdgeValues()[csr_index].Mass; mass_sum += m_ij; //reference for velocity of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& u_j = rVelocity[j_neighbour]; //add contributions of numerator componentwisely for (unsigned int comp = 0; comp < TDim; comp++) a_i[comp] += m_ij * u_j[comp]; } //for Dirichlet boundary nodes lumped values have to be included //attention: nodes with Neumann pressure condition are treated as interior points! if ((static_cast<unsigned int>(mNodalFlag[i_node]) != 0) && (static_cast<unsigned int>(mNodalFlag[i_node]) != 5) && (static_cast<unsigned int>(mNodalFlag[i_node]) != 4)) { //taking into account diagonal matrix elements double m_ii = mr_matrix_container.GetLumpedMass()[i_node] - mass_sum; const array_1d<double, TDim>& u_i = rVelocity[i_node]; //add contribution to advective velocity for (unsigned int comp = 0; comp < TDim; comp++) a_i[comp] += m_ii * u_i[comp]; //add contribution to mass sum mass_sum += m_ii; } //weighting contributions by the mass sum of all (surrounding) edges for (unsigned int comp = 0; comp < TDim; comp++) a_i[comp] /= mass_sum; } } else { //get number of nodes int n_nodes = rVelocity.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& aaa = rAdvectiveVelocity[i_node]; const array_1d<double, TDim>& u_i = rVelocity[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = u_i[comp]; } // noalias(rAdvectiveVelocity[i_node]) = mvel_n1[i_node]; } // for (unsigned int i_node = 0; i_node < n_nodes; i_node++) // noalias(rAdvectiveVelocity[i_node]) = mvel_n1[i_node]; KRATOS_CATCH("") } //********************************************************* //function to calculate minimum length of surrounding edges void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //get number of nodes unsigned int n_nodes = rNodes.size(); //reserve memory for storage of nodal coordinates std::vector< array_1d<double, 3> > position; position.resize(n_nodes); //get position of all nodes for (typename ModelPart::NodesContainerType::iterator node_it=rNodes.begin(); node_it!=rNodes.end(); node_it++) { //get the global index of the node unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); //save its coordinates locally noalias(position[i_node]) = node_it->Coordinates(); //initialize minimum edge length with relatively big values mHmin[i_node] = 1e10; } ValuesVectorType& aaa = mr_matrix_container.GetHmin(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { mHmin[i_node] = aaa[i_node]; } //take unstructured meshes into account if(TDim == 2) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; // double& rho_i = mRho[i_node]; h_i = sqrt(2.0*m_i); } } else if(TDim == 3) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; // double& rho_i = mRho[i_node]; h_i = pow (6.0*m_i, 1.0/3.0); } } //compute edge coordinates for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, 3>& pos_i = position[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; array_1d<double, 3>& pos_j = position[j_neighbour]; array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; for (unsigned int comp = 0; comp < TDim; comp++) l_k[comp] = pos_i[comp] - pos_j[comp]; } } KRATOS_CATCH("") } //******************************************************* //function to calculate monotonicity preserving term beta void ComputeMonotonicityPreserving() { KRATOS_TRY unsigned int n_nodes = mPn1.size(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& p_i = mPn1[i_node]; array_1d<double, TDim>& xi_i = mXi[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; double& p_j = mPn1[j_neighbour]; array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; array_1d<double, TDim>& xi_j = mXi[j_neighbour]; double press_diff = p_i - p_j; double proj_sum = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) proj_sum += l_k[comp] * (xi_i[comp] + xi_j[comp]); proj_sum *= 0.5; double temp = fabs(press_diff) + fabs(proj_sum); if (temp <= 1e-10) mBeta[csr_index] = 1.0; else // mBeta[csr_index] = 1.0 - fabs(fabs(press_diff) - fabs(proj_sum)) / temp; mBeta[csr_index] = 1.0 - fabs(press_diff + proj_sum) / temp; /*mBeta[csr_index]=1.0;*/ /* if (mNodalFlag[i_node] == 1.0 || mNodalFlag[i_node] == 4.0 || mNodalFlag[i_node] == 5.0 || mNodalFlag[j_neighbour] == 1.0 || mNodalFlag[j_neighbour] == 4.0 || mNodalFlag[j_neighbour] == 5.0) mBeta[csr_index] = 0.0;*/ /*if (mBeta[csr_index]<0.0 && mBeta[csr_index]>1.0) KRATOS_WATCH(mBeta[csr_index]);*/ } } KRATOS_CATCH("") } inline double CalculateEdgeTau( const double time_inv, const double h_i, const array_1d<double,TDim>& v_i, const double h_j, const array_1d<double,TDim>& v_j) { double h_avg = 0.5 * (h_i+h_j); //calculating norm o double norm_avg = 0.0; for(unsigned int k=0; k<TDim; k++) norm_avg += pow(v_i[k] + v_j[k],2); norm_avg *= 0.25; norm_avg = sqrt(norm_avg); return 1.0 / (2.0 * norm_avg/h_avg + time_inv + 1e-6 /(h_avg*h_avg) ); } void DivideByPorosity(CalcVectorType& r_destination,const CalcVectorType& r_origin, const ValuesVectorType& porosity) { int n_nodes = r_origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& dest = r_destination[i_node]; const array_1d<double, TDim>& orig = r_origin[i_node]; double factor = 1.0/porosity[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = factor * orig[comp]; } } void MultiplyByPorosity(CalcVectorType& r_destination, const CalcVectorType& r_origin, const ValuesVectorType& porosity) { int n_nodes = r_origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& dest = r_destination[i_node]; const array_1d<double, TDim>& orig = r_origin[i_node]; double factor = porosity[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = factor * orig[comp]; } } }; } //namespace Kratos #endif //KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED defined
box_coder_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/pten/kernels/funcs/math_function.h" namespace paddle { namespace operators { enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 }; inline BoxCodeType GetBoxCodeType(const std::string &type) { PADDLE_ENFORCE_EQ( (type == "encode_center_size") || (type == "decode_center_size"), true, platform::errors::InvalidArgument( "The 'code_type' attribute in BoxCoder" " must be 'encode_center_size' or 'decode_center_size'. " "But received 'code_type' is %s", type)); if (type == "encode_center_size") { return BoxCodeType::kEncodeCenterSize; } else { return BoxCodeType::kDecodeCenterSize; } } template <typename DeviceContext, typename T> class BoxCoderKernel : public framework::OpKernel<T> { public: void EncodeCenterSize(const framework::Tensor *target_box, const framework::Tensor *prior_box, const framework::Tensor *prior_box_var, const bool normalized, const std::vector<float> variance, T *output) const { int64_t row = target_box->dims()[0]; int64_t col = prior_box->dims()[0]; int64_t len = prior_box->dims()[1]; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { auto *target_box_data = target_box->data<T>(); auto *prior_box_data = prior_box->data<T>(); size_t offset = i * col * len + j * len; T prior_box_width = prior_box_data[j * len + 2] - prior_box_data[j * len] + (normalized == false); T prior_box_height = prior_box_data[j * len + 3] - prior_box_data[j * len + 1] + (normalized == false); T prior_box_center_x = prior_box_data[j * len] + prior_box_width / 2; T prior_box_center_y = prior_box_data[j * len + 1] + prior_box_height / 2; T target_box_center_x = (target_box_data[i * len + 2] + target_box_data[i * len]) / 2; T target_box_center_y = (target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2; T target_box_width = target_box_data[i * len + 2] - target_box_data[i * len] + (normalized == false); T target_box_height = target_box_data[i * len + 3] - target_box_data[i * len + 1] + (normalized == false); output[offset] = (target_box_center_x - prior_box_center_x) / prior_box_width; output[offset + 1] = (target_box_center_y - prior_box_center_y) / prior_box_height; output[offset + 2] = std::log(std::fabs(target_box_width / prior_box_width)); output[offset + 3] = std::log(std::fabs(target_box_height / prior_box_height)); } } if (prior_box_var) { const T *prior_box_var_data = prior_box_var->data<T>(); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { for (int k = 0; k < 4; ++k) { size_t offset = i * col * len + j * len; int prior_var_offset = j * len; output[offset + k] /= prior_box_var_data[prior_var_offset + k]; } } } } else if (!(variance.empty())) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { for (int k = 0; k < 4; ++k) { size_t offset = i * col * len + j * len; output[offset + k] /= static_cast<T>(variance[k]); } } } } } template <int axis, int var_size> void DecodeCenterSize(const framework::Tensor *target_box, const framework::Tensor *prior_box, const framework::Tensor *prior_box_var, const bool normalized, std::vector<float> variance, T *output) const { int64_t row = target_box->dims()[0]; int64_t col = target_box->dims()[1]; int64_t len = target_box->dims()[2]; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { auto *target_box_data = target_box->data<T>(); auto *prior_box_data = prior_box->data<T>(); T var_data[4] = {1., 1., 1., 1.}; T *var_ptr = var_data; size_t offset = i * col * len + j * len; int prior_box_offset = axis == 0 ? j * len : i * len; T prior_box_width = prior_box_data[prior_box_offset + 2] - prior_box_data[prior_box_offset] + (normalized == false); T prior_box_height = prior_box_data[prior_box_offset + 3] - prior_box_data[prior_box_offset + 1] + (normalized == false); T prior_box_center_x = prior_box_data[prior_box_offset] + prior_box_width / 2; T prior_box_center_y = prior_box_data[prior_box_offset + 1] + prior_box_height / 2; T target_box_center_x = 0, target_box_center_y = 0; T target_box_width = 0, target_box_height = 0; int prior_var_offset = axis == 0 ? j * len : i * len; if (var_size == 2) { std::memcpy(var_ptr, prior_box_var->data<T>() + prior_var_offset, 4 * sizeof(T)); } else if (var_size == 1) { var_ptr = reinterpret_cast<T *>(variance.data()); } T box_var_x = *var_ptr; T box_var_y = *(var_ptr + 1); T box_var_w = *(var_ptr + 2); T box_var_h = *(var_ptr + 3); target_box_center_x = box_var_x * target_box_data[offset] * prior_box_width + prior_box_center_x; target_box_center_y = box_var_y * target_box_data[offset + 1] * prior_box_height + prior_box_center_y; target_box_width = std::exp(box_var_w * target_box_data[offset + 2]) * prior_box_width; target_box_height = std::exp(box_var_h * target_box_data[offset + 3]) * prior_box_height; output[offset] = target_box_center_x - target_box_width / 2; output[offset + 1] = target_box_center_y - target_box_height / 2; output[offset + 2] = target_box_center_x + target_box_width / 2 - (normalized == false); output[offset + 3] = target_box_center_y + target_box_height / 2 - (normalized == false); } } } void Compute(const framework::ExecutionContext &context) const override { auto *prior_box = context.Input<framework::Tensor>("PriorBox"); auto *prior_box_var = context.Input<framework::Tensor>("PriorBoxVar"); auto *target_box = context.Input<framework::LoDTensor>("TargetBox"); auto *output_box = context.Output<framework::Tensor>("OutputBox"); std::vector<float> variance = context.Attr<std::vector<float>>("variance"); const int axis = context.Attr<int>("axis"); if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL, platform::errors::InvalidArgument( "Input(TargetBox) of BoxCoder operator " "supports LoD with only one level. But received " "level = %d", target_box->lod().size())); } if (prior_box_var) { PADDLE_ENFORCE_EQ(variance.empty(), true, platform::errors::InvalidArgument( "Input 'PriorBoxVar' and attribute 'variance' " "of BoxCoder operator should not be used at the " "same time.")); } if (!(variance.empty())) { PADDLE_ENFORCE_EQ(static_cast<int>(variance.size()), 4, platform::errors::InvalidArgument( "Size of attribute 'variance' of BoxCoder " "operator should be 4. But received " "size = %d", variance.size())); } auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type")); bool normalized = context.Attr<bool>("box_normalized"); auto row = target_box->dims()[0]; auto col = prior_box->dims()[0]; if (code_type == BoxCodeType::kDecodeCenterSize) { col = target_box->dims()[1]; } auto len = prior_box->dims()[1]; output_box->mutable_data<T>({row, col, len}, context.GetPlace()); T *output = output_box->data<T>(); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, variance, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { if (prior_box_var) { if (axis == 0) { DecodeCenterSize<0, 2>(target_box, prior_box, prior_box_var, normalized, variance, output); } else { DecodeCenterSize<1, 2>(target_box, prior_box, prior_box_var, normalized, variance, output); } } else if (!(variance.empty())) { if (axis == 0) { DecodeCenterSize<0, 1>(target_box, prior_box, prior_box_var, normalized, variance, output); } else { DecodeCenterSize<1, 1>(target_box, prior_box, prior_box_var, normalized, variance, output); } } else { if (axis == 0) { DecodeCenterSize<0, 0>(target_box, prior_box, prior_box_var, normalized, variance, output); } else { DecodeCenterSize<1, 0>(target_box, prior_box, prior_box_var, normalized, variance, output); } } } } }; } // namespace operators } // namespace paddle
ktuple_pair.c
/* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /********************************************************************* * Clustal Omega - Multiple sequence alignment * * Copyright (C) 2010 University College Dublin * * Clustal-Omega is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is part of Clustal-Omega. * ********************************************************************/ /* * RCS $Id: ktuple_pair.c 230 2011-04-09 15:37:50Z andreas $ * * * K-Tuple code for pairwise alignment (Wilbur and Lipman, 1983; PMID * 6572363). Most code taken from showpair.c (Clustal 1.83) * DD: some functions now have lots of parameters as static variables * were removed to make code OpenMP-friendly * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <math.h> #include <assert.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #include "squid/squid.h" #include "util.h" #include "symmatrix.h" #include "ktuple_pair.h" #include "log.h" #include "progress.h" #define END_MARK -3 /* see interface.c in 1.83 */ #define NUMRES 32 /* max size of comparison matrix */ /* see notes below */ #undef SORT_LAST_ELEMENT_AS_WELL /* gap_pos1 = NUMRES-2; /@ code for gaps inserted by clustalw @/ */ static const int GAP_POS2 = NUMRES-1; /* code for gaps already in alignment */ static bool DNAFLAG = FALSE; static const char *AMINO_ACID_CODES = "ABCDEFGHIKLMNPQRSTUVWXYZ-"; static const char *NUCLEIC_ACID_CODES = "ACGTUN-"; /* As far as I understand the gap symbol should not be necessary here, * because we use isgap for testing later anyway. But changing this, * will affect max_res_code and max_nuc as well. So I leave it for now * as it is. AW */ static bool percent = TRUE; static void make_ptrs(int *tptr, int *pl, const int naseq, const int l, const int ktup, const int max_res_code, char **seq_array); static void put_frag(const int fs, const int v1, const int v2, const int flen, const int curr_frag, int *next, int *maxsf, int **accum); static bool frag_rel_pos(int a1, int b1, int a2, int b2, int ktup); static void des_quick_sort(int *array1, int *array2, const int array_size); static void pair_align(int seq_no, int l1, int l2, int max_res_code, ktuple_param_t *aln_param, char **seq_array, int *maxsf, int **accum, int max_aln_length, int *zza, int *zzb, int *zzc, int *zzd); static void encode(char *seq, char *naseq, int l, const char *res_codes); static int res_index(const char *lookup, char c); typedef struct { int i1; int i2; } two_ints_t; /* default ktuple pairwise alignment parameters * */ /* protein */ /* designated initializer */ const ktuple_param_t default_protein_param = { .ktup = 1, .wind_gap = 3, .signif = 5, .window = 5, }; /* dna */ /* designated initializer */ const ktuple_param_t default_dna_param = { .ktup = 2, .wind_gap = 5, .signif = 4, .window = 4, }; /** * note: naseq should be unit-offset */ static void encode(char *seq, char *naseq, int l, const char *res_codes) { /* code seq as ints .. use GAP_POS2 for gap */ register int i; bool seq_contains_unknown_char = FALSE; /*LOG_DEBUG("seq=%s naseq=%p l=%d", &(seq[1]), naseq, l); */ for (i=1; i<=l; i++) { char res = toupper(seq[i]); if (isgap(res)) { naseq[i] = GAP_POS2; /* gap in input */ } else { naseq[i] = res_index(res_codes, res); } /*LOG_DEBUG("Character '%c' at pos %d", res, i);*/ if (-1 == naseq[i]) { seq_contains_unknown_char = TRUE; /*LOG_DEBUG("Unknown character '%c' at pos %d", res, i);*/ } /*LOG_DEBUG("na_seq[%d]=%d", i, naseq[i]);*/ } if (TRUE == seq_contains_unknown_char) Log(&rLog, LOG_WARN, "Unknown character in seq '%s'", &(seq[1])); naseq[i] = END_MARK; return; } /* end of encode */ /** * */ static int res_index(const char *t, char c) { register int i; for (i=0; t[i] && t[i] != c; i++) ; if (t[i]) { return (i); } else { return -1; } } /* end of res_index */ /** * */ static void make_ptrs(int *tptr, int *pl, const int naseq, const int l, const int ktup, const int max_res_code, char **seq_array) { /* FIXME make 10 a constant and give it a nice name */ static int a[10]; int i, j, code, flag; char residue; const int limit = (int) pow((double)(max_res_code+1),(double)ktup); for (i=1;i<=ktup;i++) a[i] = (int) pow((double)(max_res_code+1),(double)(i-1)); for (i=1; i<=limit; ++i) pl[i]=0; for (i=1; i<=l; ++i) tptr[i]=0; for (i=1; i<=(l-ktup+1); ++i) { code=0; flag=FALSE; for (j=1; j<=ktup; ++j) { /* Log(&rLog, LOG_FORCED_DEBUG, "naseq=%d i=%d j=%d seq_array[naseq]=%p", * naseq, i, j, seq_array[naseq]); */ residue = seq_array[naseq][i+j-1]; /* Log(&rLog, LOG_FORCED_DEBUG, "residue = %d", residue); */ if ((residue<0) || (residue > max_res_code)){ flag=TRUE; break; } code += ((residue) * a[j]); } if (flag) continue; ++code; if (0 != pl[code]) tptr[i] =pl[code]; pl[code] = i; } return; } /* end of make_ptrs */ /** * * FIXME Why hardcoding of 5? */ static void put_frag(const int fs, const int v1, const int v2, const int flen, const int curr_frag, int *next, int *maxsf, int **accum) { int end; accum[0][curr_frag]=fs; accum[1][curr_frag]=v1; accum[2][curr_frag]=v2; accum[3][curr_frag]=flen; if (!*maxsf) { *maxsf=1; accum[4][curr_frag]=0; return; } if (fs >= accum[0][*maxsf]) { accum[4][curr_frag]=*maxsf; *maxsf=curr_frag; return; } else { *next=*maxsf; while (TRUE) { end=*next; *next=accum[4][*next]; if (fs>=accum[0][*next]) break; } accum[4][curr_frag]=*next; accum[4][end]=curr_frag; } return; } /* end of put_frag */ /** * */ static bool frag_rel_pos(int a1, int b1, int a2, int b2, int ktup) { if (a1-b1 == a2-b2) { if (a2<a1) { return TRUE; } } else { if (a2+ktup-1<a1 && b2+ktup-1<b1) { return TRUE; } } return FALSE; } /* end of frag_rel_pos */ /** * * @note: This is together with des_quick_sort most time consuming * routine according to gprof on r110. Tried to replace it with qsort * and/or QSortAndTrackIndex(), which is always slower! So we keep the * original. * * Original doc: Quicksort routine, adapted from chapter 4, page 115 * of software tools by Kernighan and Plauger, (1986). Sort the * elements of array1 and sort the elements of array2 accordingly * * There might be a bug here. The original function apparently never * touches the last element and keeps it as is. Tried to fix this (see * SORT_LAST_ELEMENT_AS_WELL) which gives slightly worse performance * (-0.5% on BB). My fix might not be working or it's not a bug at * all... * * * */ static void des_quick_sort(int *array1, int *array2, const int array_size) { int temp1, temp2; int p, pivlin; int i, j; int lst[50], ust[50]; /* the maximum no. of elements must be*/ /* < log(base2) of 50 */ #if 0 for (i=1; i<=array_size; i++) { Log(&rLog, LOG_FORCED_DEBUG, "b4 sort array1[%d]=%d array2[%d]=%d", i, array1[i], i, array2[i]); } #endif lst[1] = 1; #ifdef SORT_LAST_ELEMENT_AS_WELL ust[1] = array_size; #else /* original */ ust[1] = array_size-1; #endif p = 1; while (p > 0) { if (lst[p] >= ust[p]) { p--; } else { i = lst[p] - 1; j = ust[p]; pivlin = array1[j]; while (i < j) { for (i=i+1; array1[i] < pivlin; i++) ; for (j=j-1; j > i; j--) if (array1[j] <= pivlin) break; if (i < j) { temp1 = array1[i]; array1[i] = array1[j]; array1[j] = temp1; temp2 = array2[i]; array2[i] = array2[j]; array2[j] = temp2; } } j = ust[p]; temp1 = array1[i]; array1[i] = array1[j]; array1[j] = temp1; temp2 = array2[i]; array2[i] = array2[j]; array2[j] = temp2; if (i-lst[p] < ust[p] - i) { lst[p+1] = lst[p]; ust[p+1] = i - 1; lst[p] = i + 1; } else { lst[p+1] = i + 1; ust[p+1] = ust[p]; ust[p] = i - 1; } p = p + 1; } } #if 0 for (i=1; i<=array_size; i++) { Log(&rLog, LOG_FORCED_DEBUG, "after sort array1[%d]=%d array2[%d]=%d", i, array1[i], i, array2[i]); } #endif return; } /* end of des_quick_sort */ /** * * FIXME together with des_quick_sort most time consuming routine * according to gprof on r110 * */ static void pair_align(int seq_no, int l1, int l2, int max_res_code, ktuple_param_t *aln_param, char **seq_array, int *maxsf, int **accum, int max_aln_length, int *zza, int *zzb, int *zzc, int *zzd) { int next; /* forrmerly static */ int pot[8],i, j, l, m, flag, limit, pos, vn1, vn2, flen, osptr, fs; int tv1, tv2, encrypt, subt1, subt2, rmndr; char residue; int *diag_index; int *displ; char *slopes; int curr_frag; const int tl1 = (l1+l2)-1; assert(NULL!=aln_param); /* Log(&rLog, LOG_FORCED_DEBUG, "DNAFLAG=%d seq_no=%d l1=%d l2=%d window=%d ktup=%d signif=%d wind_gap=%d", DNAFLAG, seq_no, l1, l2, window, ktup, signif, wind_gap); */ slopes = (char *) CKCALLOC(tl1+1, sizeof(char)); displ = (int *) CKCALLOC(tl1+1, sizeof(int)); diag_index = (int *) CKMALLOC((tl1+1) * sizeof(int)); for (i=1; i<=tl1; ++i) { /* unnecessary, because we calloced: slopes[i] = displ[i] = 0; */ diag_index[i] = i; } for (i=1;i<=aln_param->ktup;i++) pot[i] = (int) pow((double)(max_res_code+1),(double)(i-1)); limit = (int) pow((double)(max_res_code+1),(double)aln_param->ktup); /* increment diagonal score for each k_tuple match */ for (i=1; i<=limit; ++i) { vn1=zzc[i]; while (TRUE) { if (!vn1) break; vn2 = zzd[i]; while (0 != vn2) { osptr = vn1-vn2+l2; ++displ[osptr]; vn2 = zzb[vn2]; } vn1=zza[vn1]; } } /* choose the top SIGNIF diagonals */ #ifdef QSORT_REPLACEMENT /* This was an attempt to replace des_quick_sort with qsort(), * which turns out to be much slower, so don't use this */ /* FIXME: if we use this branch, we don't need to init diag_index * before, because that is done in QSortAndTrackIndex() * automatically. */ #if 0 for (i=1; i<=tl1; i++) { Log(&rLog, LOG_FORCED_DEBUG, "b4 sort disp[%d]=%d diag_index[%d]=%d", i, diag_index[i], i, displ[i]); } #endif QSortAndTrackIndex(&(diag_index[1]), &(displ[1]), tl1, 'a', TRUE); #if 0 for (i=1; i<=tl1; i++) { Log(&rLog, LOG_FORCED_DEBUG, "after sort disp[%d]=%d diag_index[%d]=%d", i, diag_index[i], i, displ[i]); } #endif #else des_quick_sort(displ, diag_index, tl1); #endif j = tl1 - aln_param->signif + 1; if (j < 1) { j = 1; } /* flag all diagonals within WINDOW of a top diagonal */ for (i=tl1; i>=j; i--) { if (displ[i] > 0) { pos = diag_index[i]; l = (1 > pos - aln_param->window) ? 1 : pos - aln_param->window; m = (tl1 < pos + aln_param->window) ? tl1 : pos + aln_param->window; for (; l <= m; l++) slopes[l] = 1; } } for (i=1; i<=tl1; i++) { displ[i] = 0; } curr_frag=*maxsf=0; for (i=1; i<=(l1-aln_param->ktup+1); ++i) { encrypt=flag=0; for (j=1; j<=aln_param->ktup; ++j) { residue = seq_array[seq_no][i+j-1]; if ((residue<0) || (residue>max_res_code)) { flag=TRUE; break; } encrypt += ((residue)*pot[j]); } if (flag) { continue; } ++encrypt; vn2=zzd[encrypt]; flag=FALSE; while (TRUE) { if (!vn2) { flag=TRUE; break; } osptr=i-vn2+l2; if (1 != slopes[osptr]) { vn2=zzb[vn2]; continue; } flen=0; fs=aln_param->ktup; next=*maxsf; /* * A-loop */ while (TRUE) { if (!next) { ++curr_frag; if (curr_frag >= 2*max_aln_length) { Log(&rLog, LOG_VERBOSE, "(Partial alignment)"); goto free_and_exit; /* Yesss! Always wanted to * use a goto (AW) */ } displ[osptr]=curr_frag; put_frag(fs, i, vn2, flen, curr_frag, &next, maxsf, accum); } else { tv1=accum[1][next]; tv2=accum[2][next]; if (frag_rel_pos(i, vn2, tv1, tv2, aln_param->ktup)) { if (i-vn2 == accum[1][next]-accum[2][next]) { if (i > accum[1][next]+(aln_param->ktup-1)) { fs = accum[0][next]+aln_param->ktup; } else { rmndr = i-accum[1][next]; fs = accum[0][next]+rmndr; } flen=next; next=0; continue; } else { if (0 == displ[osptr]) { subt1=aln_param->ktup; } else { if (i > accum[1][displ[osptr]]+(aln_param->ktup-1)) { subt1=accum[0][displ[osptr]]+aln_param->ktup; } else { rmndr=i-accum[1][displ[osptr]]; subt1=accum[0][displ[osptr]]+rmndr; } } subt2=accum[0][next] - aln_param->wind_gap + aln_param->ktup; if (subt2>subt1) { flen=next; fs=subt2; } else { flen=displ[osptr]; fs=subt1; } next=0; continue; } } else { next=accum[4][next]; continue; } } break; } /* * End of Aloop */ vn2=zzb[vn2]; } } free_and_exit: CKFREE(displ); CKFREE(slopes); CKFREE(diag_index); return; } /* end of pair_align */ /** * * Will compute ktuple scores and store in tmat * Following values will be set: tmat[i][j], where * istart <= i <iend * and * jstart <= j < jend * i.e. zero-offset * tmat data members have to be preallocated * * if ktuple_param_t *aln_param == NULL defaults will be used */ void KTuplePairDist(symmatrix_t *tmat, mseq_t *mseq, int istart, int iend, int jstart, int jend, ktuple_param_t *param_override, progress_t *prProgress, unsigned long int *ulStepNo, unsigned long int ulTotalStepNo) { /* this first group of variables were previously static and hence un-parallelisable */ char **seq_array; int maxsf; int **accum; int max_aln_length; /* divide score with length of smallest sequence */ int *zza, *zzb, *zzc, *zzd; int private_step_no = 0; int i, j, dsr; double calc_score; int max_res_code = -1; int max_seq_len; int *seqlen_array; /* progress_t *prProgress; */ /* int uStepNo, uTotalStepNo; */ ktuple_param_t aln_param = default_protein_param; bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE; if(prProgress == NULL) { NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Ktuple-distance calculation progress", bPrintCR); } /* conversion to old style data types follows * */ seqlen_array = (int*) CKMALLOC((mseq->nseqs+1) * sizeof(int)); for (i=0; i<mseq->nseqs; i++) { seqlen_array[i+1] = mseq->sqinfo[i].len; } /* setup alignment parameters */ if (SEQTYPE_PROTEIN == mseq->seqtype) { DNAFLAG = FALSE; max_res_code = strlen(AMINO_ACID_CODES)-2; aln_param = default_protein_param; } else if (SEQTYPE_RNA == mseq->seqtype || SEQTYPE_DNA == mseq->seqtype) { DNAFLAG = TRUE; max_res_code = strlen(NUCLEIC_ACID_CODES)-2; aln_param = default_dna_param; } else { Log(&rLog, LOG_FATAL, "Internal error in %s: Unknown sequence type.", __FUNCTION__); } if (NULL!=param_override) { aln_param.ktup = param_override->ktup; aln_param.wind_gap = param_override->wind_gap; aln_param.signif = param_override->signif; aln_param.window = param_override->window; } /*LOG_DEBUG("DNAFLAG = %d max_res_code = %d", DNAFLAG, max_res_code);*/ /* convert mseq to clustal's old-style int encoded sequences (unit-offset) */ max_aln_length = 0; max_seq_len = 0; seq_array = (char **) CKMALLOC((mseq->nseqs+1) * sizeof(char *)); seq_array[0] = NULL; /* FIXME check that non of the seqs is smaller than ktup (?). * Otherwise segfault occurs */ for (i=0; i<mseq->nseqs; i++) { seq_array[i+1] = (char *) CKMALLOC((seqlen_array[i+1]+2) * sizeof (char));; } for (i=0; i<mseq->nseqs; i++) { /*LOG_DEBUG("calling encode with seq_array[%d+1] len=%d and seq=%s", i, seqlen_array[i+1], mseq->seq[i]);*/ if (TRUE == DNAFLAG) { encode(&(mseq->seq[i][-1]), seq_array[i+1], seqlen_array[i+1], NUCLEIC_ACID_CODES); } else { encode(&(mseq->seq[i][-1]), seq_array[i+1], seqlen_array[i+1], AMINO_ACID_CODES); } if (seqlen_array[i+1]>max_seq_len) { max_seq_len = seqlen_array[i+1]; } } max_aln_length = max_seq_len * 2; /* see sequence.c in old source */ /* FIXME: short sequences can cause seg-fault * because max_aln_length can get shorter * than (max_res_code+1)^k * FS, r222->r223 */ max_aln_length = max_aln_length > pow((max_res_code+1), aln_param.ktup)+1 ? max_aln_length : pow((max_res_code+1), aln_param.ktup)+1; /* * * conversion to old style clustal done (in no time) */ accum = (int **) CKCALLOC(5, sizeof (int *)); for (i=0;i<5;i++) { accum[i] = (int *) CKCALLOC((2*max_aln_length+1), sizeof(int)); } zza = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); zzb = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); zzc = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); zzd = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); /* estimation of total number of steps (if istart and jstart are * both 0) (now handled in the calling routine) */ /* uTotalStepNo = iend*jend - iend*iend/2 + iend/2; uStepNo = 0; */ /*LOG_DEBUG("istart=%d iend=%d jstart=%d jend=%d", istart, iend, jstart, jend);*/ for (i=istart+1; i<=iend; ++i) { /* by definition a sequence compared to itself should give a score of 0. AW */ SymMatrixSetValue(tmat, i-1, i-1, 0.0); make_ptrs(zza, zzc, i, seqlen_array[i], aln_param.ktup, max_res_code, seq_array); #ifdef HAVE_OPENMP #pragma omp critical(ktuple) #endif { ProgressLog(prProgress, *ulStepNo, ulTotalStepNo, FALSE); } for (j=MAX(i+1, jstart+1); j<=jend; ++j) { (*ulStepNo)++; private_step_no++; /*LOG_DEBUG("comparing pair %d:%d", i, j);*/ make_ptrs(zzb, zzd, j, seqlen_array[j], aln_param.ktup, max_res_code, seq_array); pair_align(i, seqlen_array[i], seqlen_array[j], max_res_code, &aln_param, seq_array, &maxsf, accum, max_aln_length, zza, zzb, zzc, zzd); if (!maxsf) { calc_score=0.0; } else { calc_score=(double)accum[0][maxsf]; if (percent) { dsr=(seqlen_array[i]<seqlen_array[j]) ? seqlen_array[i] : seqlen_array[j]; calc_score = (calc_score/(double)dsr) * 100.0; } } /* printf("%d %d %d\n", i-1, j-1, (100.0 - calc_score)/100.0); */ SymMatrixSetValue(tmat, i-1, j-1, (100.0 - calc_score)/100.0); /* the function allows you not to compute the full matrix. * here we explicitely make the resulting matrix a * rectangle, i.e. we always set full rows. in other * words, if we don't complete the full matrix then we * don't have a full symmetry. so only use the defined * symmetric part. AW */ /*LOG_DEBUG("setting %d : %d = %f", j, i, tmat[i][j]);*/ /* not needed anymore since we use symmatrix_t if (j<=iend) { tmat[j][i] = tmat[i][j]; } */ #ifdef HAVE_OPENMP #pragma omp critical(ktuple) #endif { Log(&rLog, LOG_DEBUG, "K-tuple distance for sequence pair %d:%d = %lg", i, j, SymMatrixGetValue(tmat, i-1, j-1)); } } } /* Log(&rLog, LOG_FORCED_DEBUG, "uTotalStepNo=%d for istart=%d iend=%d jstart=%d jend=%d", uStepNo, istart, iend, jstart, jend); Log(&rLog, LOG_FORCED_DEBUG, "Fabian = %d", iend*jend - iend*iend/2 + iend/2); */ /* printf("\n\n%d\t%d\t%d\t%d\n\n", omp_get_thread_num(), uStepNo, istart, iend); */ for (i=0;i<5;i++) { CKFREE(accum[i]); } CKFREE(accum); #ifdef HAVE_OPENMP #pragma omp critical(ktuple) #if 0 { printf("steps: %d\n", private_step_no); } #endif #endif CKFREE(zza); CKFREE(zzb); CKFREE(zzc); CKFREE(zzd); free(seqlen_array); for (i=1; i<=mseq->nseqs; i++) { CKFREE(seq_array[i]); } CKFREE(seq_array); } /* end of KTuplePairDist */
GB_binop__ge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_uint16 // A.*B function (eWiseMult): GB_AemultB__ge_uint16 // A*D function (colscale): GB_AxD__ge_uint16 // D*A function (rowscale): GB_DxB__ge_uint16 // C+=B function (dense accum): GB_Cdense_accumB__ge_uint16 // C+=b function (dense accum): GB_Cdense_accumb__ge_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint16 // C=scalar+B GB_bind1st__ge_uint16 // C=scalar+B' GB_bind1st_tran__ge_uint16 // C=A+scalar GB_bind2nd__ge_uint16 // C=A'+scalar GB_bind2nd_tran__ge_uint16 // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT16 || GxB_NO_GE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ge_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dynamic_module_load.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-aarch64-unknown-linux-gnu -ldl && %libomptarget-run-aarch64-unknown-linux-gnu %t.so 2>&1 | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-powerpc64-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-powerpc64le-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64le-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-x86_64-pc-linux-gnu -ldl && %libomptarget-run-x86_64-pc-linux-gnu %t.so 2>&1 | %fcheck-x86_64-pc-linux-gnu #ifdef SHARED #include <stdio.h> int foo() { #pragma omp target ; printf("%s\n", "DONE."); return 0; } #else #include <dlfcn.h> #include <stdio.h> int main(int argc, char **argv) { void *Handle = dlopen(argv[1], RTLD_NOW); int (*Foo)(void); if (Handle == NULL) { printf("dlopen() failed: %s\n", dlerror()); return 1; } Foo = (int (*)(void)) dlsym(Handle, "foo"); if (Handle == NULL) { printf("dlsym() failed: %s\n", dlerror()); return 1; } // CHECK: DONE. // CHECK-NOT: {{abort|fault}} return Foo(); } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
force.c
#include <stdio.h> #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include <ktime.h> #include <geometry.h> #ifdef __USE_HW_COUNTER #include <perf.h> #include <kperf.h> #endif #include <phy.h> /* Calculates the forces (Drag FORCE, LIFT FORCE, and the momentum) */ void compute_force(struct force *restrict f) { #ifdef __USE_HW_COUNTER const struct fd fd = f->perf_counters->fd; struct counters start; perf_read(fd, &start); const uint64_t icycle = __rdtsc(); #endif struct ktime ktime; setktime(&ktime); const struct geometry *restrict g = f->g; const struct ivals * iv = f->iv; const double *restrict q = f->q; double lift = 0.f; double drag = 0.f; double momn = 0.f; const uint32_t snfc = g->s->snfc; const uint32_t *restrict snfic = g->s->snfic; uint32_t i; for(i = 0; i < snfc; i++) { uint32_t if0 = snfic[i]; uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for reduction(+: lift, drag, momn) for(j = if0; j < if1; j++) { uint32_t n0 = g->b->snfptr->n0[j]; uint32_t n1 = g->b->snfptr->n1[j]; uint32_t n2 = g->b->snfptr->n2[j]; double x0 = g->n->xyz->x0[n0]; double y0 = g->n->xyz->x1[n0]; double z0 = g->n->xyz->x2[n0]; double x1 = g->n->xyz->x0[n1]; double y1 = g->n->xyz->x1[n1]; double z1 = g->n->xyz->x2[n1]; double x2 = g->n->xyz->x0[n2]; double y2 = g->n->xyz->x1[n2]; double z2 = g->n->xyz->x2[n2]; /* Delta coordinates in all directions */ double ax = x1 - x0; double ay = y1 - y0; double az = z1 - z0; double bx = x2 - x0; double by = y2 - y0; double bz = z2 - z0; /* Norm points outward, away from grid interior. Norm magnitude is area of surface triangle. */ double xnorm = ay * bz; xnorm -= az * by; xnorm = -0.5f * xnorm; double ynorm = ax * bz; ynorm -= az * bx; ynorm = 0.5f * ynorm; /* Pressure values store at every face node */ double p0 = q[g->c->bsz * n0]; double p1 = q[g->c->bsz * n1]; double p2 = q[g->c->bsz * n2]; double press = (p0 + p1 + p2) / 3.f; double cp = 2.f * (press - 1.f); double dcx = cp * xnorm; double dcy = cp * ynorm; double xmid = x0 + x1 + x2; double ymid = y0 + y1 + y2; lift = lift - dcx * iv->v + dcy * iv->u; drag = drag + dcx * iv->u + dcy * iv->v; momn = momn + (xmid - 0.25f) * dcy - ymid * dcx; } } (* f->clift) = lift; (* f->cdrag) = drag; (* f->cmomn) = momn; compute_time(&ktime, &f->t->forces); #ifdef __USE_HW_COUNTER const uint64_t cycle = __rdtsc() - icycle; struct counters end; perf_read(fd, &end); struct tot tot; perf_calc(start, end, &tot); f->perf_counters->ctrs->forces.cycles += cycle; f->perf_counters->ctrs->forces.tot.imcR += tot.imcR; f->perf_counters->ctrs->forces.tot.imcW += tot.imcW; f->perf_counters->ctrs->forces.tot.edcR += tot.edcR; f->perf_counters->ctrs->forces.tot.edcW += tot.edcW; #endif }
rawSHA1_ng_fmt_plug.c
// // Alternative SSE2 optimised raw SHA-1 implementation for John The Ripper. // // This plugin requires -msse4 in CFLAGS. // // Copyright (C) 2012 Tavis Ormandy <taviso@cmpxchg8b.com> // Copyright (c) 2015 magnum (AVX2/AVX512 support) // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Library General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Library General Public License for more details. // // You should have received a copy of the GNU Library General Public // License along with this library; if not, write to the // Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, // Boston, MA 02110-1301, USA. // #include "arch.h" #if defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER && !__ARM_NEON #if FMT_EXTERNS_H extern struct fmt_main fmt_sha1_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_sha1_ng); #else #include "misc.h" #if !defined(DEBUG) && !defined(WITH_ASAN) // These compilers claim to be __GNUC__ but warn on gcc pragmas. #if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER #pragma GCC optimize 3 #pragma GCC optimize "-fprefetch-loop-arrays" #endif #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 #endif #include <string.h> #if !FAST_FORMATS_OMP #undef _OPENMP #elif _OPENMP #include <omp.h> #endif #include "stdbool.h" #include "stdint.h" #if SIMD_COEF_32 > 8 #include "int128.h" #endif #include "pseudo_intrinsics.h" #include "stdint.h" #include "params.h" #include "formats.h" #include "memory.h" #include "sha.h" #include "johnswap.h" #include "aligned.h" #include "rawSHA1_common.h" #include "memdbg.h" #define VWIDTH SIMD_COEF_32 #define SHA1_BLOCK_SIZE 64 #define SHA1_BLOCK_WORDS 16 #define SHA1_DIGEST_SIZE 20 #define SHA1_DIGEST_WORDS 5 #define SHA1_PARALLEL_HASH 512 // This must be a multiple of max VWIDTH. #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 // Multiplier to hide OMP overhead #endif #endif #define X(X0, X2, X8, X13) do { \ X0 = vxor(X0, X8); \ X0 = vxor(X0, X13); \ X0 = vxor(X0, X2); \ X0 = vroti_epi32(X0, 1); \ } while (false) #define R1(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(C, D, B)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R2(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R4(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #if !VCMOV_EMULATED #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(D, B, vxor(C, B))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #else #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vor(vand(D, B), vand(vor(D, B), C))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #endif #if SIMD_COEF_32 == 4 // Not used for AVX2 and better, which has gather instructions. #define _MM_TRANSPOSE4_EPI32(R0, R1, R2, R3) do {\ vtype T0, T1, T2, T3; \ T0 = vunpacklo_epi32(R0, R1); \ T1 = vunpacklo_epi32(R2, R3); \ T2 = vunpackhi_epi32(R0, R1); \ T3 = vunpackhi_epi32(R2, R3); \ R0 = vunpacklo_epi64(T0, T1); \ R1 = vunpackhi_epi64(T0, T1); \ R2 = vunpacklo_epi64(T2, T3); \ R3 = vunpackhi_epi64(T2, T3); \ } while (false) #endif // M and N contain the first and last 128bits of a 512bit SHA-1 message block // respectively. The remaining 256bits are always zero, and so are not stored // here to avoid the load overhead. // For AVX2, we have half a block and for AVX512/MIC we actually have a full // block. static uint32_t (*M)[VWIDTH]; static uint32_t *N; // MD contains the state of the SHA-1 A register at R75 for each of the input // messages. static uint32_t *MD; /* unused static inline uint32_t __attribute__((const)) rotateright(uint32_t value, uint8_t count) { register uint32_t result; asm("ror %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); return result; } */ static inline uint32_t __attribute__((const)) rotateleft(uint32_t value, uint8_t count) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _rotl(value, count); //((value<<count)|((ARCH_WORD_32)value>>(32-count))); #elif __i386__ || __x86_64__ asm("rol %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); #else // assume count <= 32 result = (value << count) | (value >> (32 - count)); #endif return result; } // GCC < 4.3 does not have __builtin_bswap32(), provide an alternative. #if !__INTEL_COMPILER && GCC_VERSION < 40300 #define __builtin_bswap32 bswap32 static inline uint32_t __attribute__((const)) bswap32(uint32_t value) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _byteswap_ulong(value); #elif __i386 || __x86_64__ asm("bswap %0" : "=r" (result) : "0" (value)); #else result = (value << 24) | ((value << 8) & 0xFF0000) | (value >> 24) | ((value >> 8) & 0xFF00); #endif return result; } #endif static void sha1_fmt_init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif M = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*M), MEM_ALIGN_CACHE); N = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*N), MEM_ALIGN_CACHE); MD = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*MD), MEM_ALIGN_CACHE); } static void done(void) { MEM_FREE(MD); MEM_FREE(N); MEM_FREE(M); } static void *sha1_fmt_binary(char *ciphertext) { // Static buffer storing the binary representation of ciphertext. static union { uint32_t w[SHA1_DIGEST_WORDS]; vtype v; } result; uint32_t a75; // Convert ascii representation into binary. memcpy(result.w, rawsha1_common_get_binary(ciphertext), 20); // One preprocessing step, if we calculate E80 rol 2 here, we // can compare it against A75 and save 5 rounds in crypt_all(). a75 = rotateleft(__builtin_bswap32(result.w[4]) - 0xC3D2E1F0, 2); // Fill the vector with it, so we can do a vectorized compare result.v = vset1_epi32(a75); return result.w; } // This function is called when John wants us to buffer a crypt() operation // on the specified key. We also preprocess it for SHA-1 as we load it. // // This implementation is hardcoded to only accept passwords under 15 // characters. This is because we can create a new message block in just two // MOVDQA instructions (we need 15 instead of 16 because we must append a bit // to the message). For AVX2 it's 31 characters and for AVX-512+ it's 125. // // This routine assumes that key is not on an unmapped page boundary, but // doesn't require it to be 16 byte aligned (although that would be nice). static void sha1_fmt_set_key(char *key, int index) { vtype Z = vsetzero(); vtype X = vloadu(key); vtype B; // First, find the length of the key by scanning for a zero byte. #if (__AVX512F__ && !__AVX512BW__) || __MIC__ || __ALTIVEC__ || __ARM_NEON uint32_t len = strlen(key); #else // FIXME: even uint64_t won't be long enough for AVX-1024 uint64_t mask = vcmpeq_epi8_mask(X, Z); uint32_t len = __builtin_ctzl(mask); #endif // Create a lookup tables to find correct masks for each supported input // length. It would be nice if we could use bit shifts to produce these // dynamically, but they require an immediate operand. #if VWIDTH > 8 // FIXME: a problem with using int128 here is it won't work at // all for 32-bit builds - but that may be academic. #define XX ((((uint128_t)0xFFFFFFFFFFFFFFFFULL)<<64) + 0xFFFFFFFFFFFFFFFFULL) #define YY ((uint128_t)0x80) #define ZZ ((uint128_t)0x0) static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kTrailingBitTable[][4] = { {YY<< 0, ZZ, ZZ, ZZ}, {YY<< 8, ZZ, ZZ, ZZ}, {YY<< 16, ZZ, ZZ, ZZ}, {YY<< 24, ZZ, ZZ, ZZ}, {YY<< 32, ZZ, ZZ, ZZ}, {YY<< 40, ZZ, ZZ, ZZ}, {YY<< 48, ZZ, ZZ, ZZ}, {YY<< 56, ZZ, ZZ, ZZ}, {YY<< 64, ZZ, ZZ, ZZ}, {YY<< 72, ZZ, ZZ, ZZ}, {YY<< 80, ZZ, ZZ, ZZ}, {YY<< 88, ZZ, ZZ, ZZ}, {YY<< 96, ZZ, ZZ, ZZ}, {YY<<104, ZZ, ZZ, ZZ}, {YY<<112, ZZ, ZZ, ZZ}, {YY<<120, ZZ, ZZ, ZZ}, {ZZ, YY<< 0, ZZ, ZZ}, {ZZ, YY<< 8, ZZ, ZZ}, {ZZ, YY<< 16, ZZ, ZZ}, {ZZ, YY<< 24, ZZ, ZZ}, {ZZ, YY<< 32, ZZ, ZZ}, {ZZ, YY<< 40, ZZ, ZZ}, {ZZ, YY<< 48, ZZ, ZZ}, {ZZ, YY<< 56, ZZ, ZZ}, {ZZ, YY<< 64, ZZ, ZZ}, {ZZ, YY<< 72, ZZ, ZZ}, {ZZ, YY<< 80, ZZ, ZZ}, {ZZ, YY<< 88, ZZ, ZZ}, {ZZ, YY<< 96, ZZ, ZZ}, {ZZ, YY<<104, ZZ, ZZ}, {ZZ, YY<<112, ZZ, ZZ}, {ZZ, YY<<120, ZZ, ZZ}, {ZZ, ZZ, YY<< 0, ZZ}, {ZZ, ZZ, YY<< 8, ZZ}, {ZZ, ZZ, YY<< 16, ZZ}, {ZZ, ZZ, YY<< 24, ZZ}, {ZZ, ZZ, YY<< 32, ZZ}, {ZZ, ZZ, YY<< 40, ZZ}, {ZZ, ZZ, YY<< 48, ZZ}, {ZZ, ZZ, YY<< 56, ZZ}, {ZZ, ZZ, YY<< 64, ZZ}, {ZZ, ZZ, YY<< 72, ZZ}, {ZZ, ZZ, YY<< 80, ZZ}, {ZZ, ZZ, YY<< 88, ZZ}, {ZZ, ZZ, YY<< 96, ZZ}, {ZZ, ZZ, YY<<104, ZZ}, {ZZ, ZZ, YY<<112, ZZ}, {ZZ, ZZ, YY<<120, ZZ}, {ZZ, ZZ, ZZ, YY<< 0}, {ZZ, ZZ, ZZ, YY<< 8}, {ZZ, ZZ, ZZ, YY<< 16}, {ZZ, ZZ, ZZ, YY<< 24}, {ZZ, ZZ, ZZ, YY<< 32}, {ZZ, ZZ, ZZ, YY<< 40}, {ZZ, ZZ, ZZ, YY<< 48}, {ZZ, ZZ, ZZ, YY<< 56}, {ZZ, ZZ, ZZ, YY<< 64}, {ZZ, ZZ, ZZ, YY<< 72}, {ZZ, ZZ, ZZ, YY<< 80}, {ZZ, ZZ, ZZ, YY<< 88}, {ZZ, ZZ, ZZ, YY<< 96}, {ZZ, ZZ, ZZ, YY<<104}, {ZZ, ZZ, ZZ, YY<<112}, {ZZ, ZZ, ZZ, YY<<120} }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kUsedBytesTable[][4] = { {XX<< 0, XX, XX, XX}, {XX<< 8, XX, XX, XX}, {XX<< 16, XX, XX, XX}, {XX<< 24, XX, XX, XX}, {XX<< 32, XX, XX, XX}, {XX<< 40, XX, XX, XX}, {XX<< 48, XX, XX, XX}, {XX<< 56, XX, XX, XX}, {XX<< 64, XX, XX, XX}, {XX<< 72, XX, XX, XX}, {XX<< 80, XX, XX, XX}, {XX<< 88, XX, XX, XX}, {XX<< 96, XX, XX, XX}, {XX<<104, XX, XX, XX}, {XX<<112, XX, XX, XX}, {XX<<120, XX, XX, XX}, {ZZ, XX<< 0, XX, XX}, {ZZ, XX<< 8, XX, XX}, {ZZ, XX<< 16, XX, XX}, {ZZ, XX<< 24, XX, XX}, {ZZ, XX<< 32, XX, XX}, {ZZ, XX<< 40, XX, XX}, {ZZ, XX<< 48, XX, XX}, {ZZ, XX<< 56, XX, XX}, {ZZ, XX<< 64, XX, XX}, {ZZ, XX<< 72, XX, XX}, {ZZ, XX<< 80, XX, XX}, {ZZ, XX<< 88, XX, XX}, {ZZ, XX<< 96, XX, XX}, {ZZ, XX<<104, XX, XX}, {ZZ, XX<<112, XX, XX}, {ZZ, XX<<120, XX, XX}, {ZZ, ZZ, XX<< 0, XX}, {ZZ, ZZ, XX<< 8, XX}, {ZZ, ZZ, XX<< 16, XX}, {ZZ, ZZ, XX<< 24, XX}, {ZZ, ZZ, XX<< 32, XX}, {ZZ, ZZ, XX<< 40, XX}, {ZZ, ZZ, XX<< 48, XX}, {ZZ, ZZ, XX<< 56, XX}, {ZZ, ZZ, XX<< 64, XX}, {ZZ, ZZ, XX<< 72, XX}, {ZZ, ZZ, XX<< 80, XX}, {ZZ, ZZ, XX<< 88, XX}, {ZZ, ZZ, XX<< 96, XX}, {ZZ, ZZ, XX<<104, XX}, {ZZ, ZZ, XX<<112, XX}, {ZZ, ZZ, XX<<120, XX}, {ZZ, ZZ, ZZ, XX<< 0}, {ZZ, ZZ, ZZ, XX<< 8}, {ZZ, ZZ, ZZ, XX<< 16}, {ZZ, ZZ, ZZ, XX<< 24}, {ZZ, ZZ, ZZ, XX<< 32}, {ZZ, ZZ, ZZ, XX<< 40}, {ZZ, ZZ, ZZ, XX<< 48}, {ZZ, ZZ, ZZ, XX<< 56}, {ZZ, ZZ, ZZ, XX<< 64}, {ZZ, ZZ, ZZ, XX<< 72}, {ZZ, ZZ, ZZ, XX<< 80}, {ZZ, ZZ, ZZ, XX<< 88}, {ZZ, ZZ, ZZ, XX<< 96}, {ZZ, ZZ, ZZ, XX<<104}, {ZZ, ZZ, ZZ, XX<<112}, {ZZ, ZZ, ZZ, XX<<120} }; #elif VWIDTH > 4 static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][8] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][8] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #else static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][4] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][4] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #endif N[index] = len; // Zero out the rest of the DQWORD in X by making a suitable mask. Z = vload(kUsedBytesTable[len]); // Find the correct position for the trailing bit required by SHA-1. B = vload(kTrailingBitTable[len]); // Now we have this: // B = 00 00 00 00 00 80 00 00 00 00 00 00 00 00 00 // Z = 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff // X = 41 41 41 41 41 00 12 34 56 78 12 34 56 78 9A // <---------------> <------------------------> // key bytes w/nul junk from stack. // Use PANDN to apply the mask, then POR to append the trailing bit // required by SHA-1, which leaves us with this: // X = 41 41 41 41 41 80 00 00 00 00 00 00 00 00 00 X = vor(vandnot(Z, X), B); // SHA-1 requires us to byte swap all the 32bit words in the message, which // we do here. // X = 40 41 42 44 45 80 00 00 00 00 00 00 00 00 00 // What we have. // X = 44 42 41 40 00 00 80 45 00 00 00 00 00 00 00 // What we want. vswap32(X); // Store the result into the message buffer. vstore(&M[index], X); return; } static char *sha1_fmt_get_key(int index) { static uint32_t key[VWIDTH + 1]; int i; // This function is not hot, we can do this slowly. First, restore // endianness. for (i = 0; i < SIMD_COEF_32; i++) key[i] = __builtin_bswap32(M[index][i]); // Skip backwards until we hit the trailing bit, then remove it. memset(strrchr((char*)(key), 0x80), 0x00, 1); return (char*) key; } static int sha1_fmt_crypt_all(int *pcount, struct db_salt *salt) { uint32_t i; // Fetch crypt count from john. const int32_t count = *pcount; // To reduce the overhead of multiple function calls, we buffer lots of // passwords, and then hash them in multiples of VWIDTH all at once. #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i += VWIDTH) { vtype W[SHA1_BLOCK_WORDS]; vtype A, B, C, D, E; vtype K; #if __AVX512F__ || __MIC__ const vtype indices = vset_epi32(15<<4,14<<4,13<<4,12<<4, 11<<4,10<<4, 9<<4, 8<<4, 7<<4, 6<<4, 5<<4, 4<<4, 3<<4, 2<<4, 1<<4, 0<<4); #elif __AVX2__ const vtype indices = vset_epi32( 7<<3, 6<<3, 5<<3, 4<<3, 3<<3, 2<<3, 1<<3, 0<<3); #endif #if __AVX2__ || __MIC__ // Gather the message right into place. uint32_t j; for (j = 0; j < VWIDTH; ++j) W[j] = vgather_epi32(&M[i][j], indices, sizeof(uint32_t)); #else // AVX has no gather instructions, so load and transpose. W[0] = vload(&M[i + 0]); W[1] = vload(&M[i + 1]); W[2] = vload(&M[i + 2]); W[3] = vload(&M[i + 3]); _MM_TRANSPOSE4_EPI32(W[0], W[1], W[2], W[3]); #endif A = vset1_epi32(0x67452301); B = vset1_epi32(0xEFCDAB89); C = vset1_epi32(0x98BADCFE); D = vset1_epi32(0x10325476); E = vset1_epi32(0xC3D2E1F0); K = vset1_epi32(0x5A827999); R1(W[0], A, B, C, D, E); R1(W[1], E, A, B, C, D); R1(W[2], D, E, A, B, C); #if VWIDTH > 4 R1(W[3], C, D, E, A, B); R1(W[4], B, C, D, E, A); R1(W[5], A, B, C, D, E); // 5 R1(W[6], E, A, B, C, D); #else R1(W[3], C, D, E, A, B); W[4] = vsetzero(); R1(W[4], B, C, D, E, A); W[5] = vsetzero(); R1(W[5], A, B, C, D, E); W[6] = vsetzero(); // 5 R1(W[6], E, A, B, C, D); W[7] = vsetzero(); #endif #if VWIDTH > 8 R1(W[7], D, E, A, B, C); R1(W[8], C, D, E, A, B); R1(W[9], B, C, D, E, A); R1(W[10], A, B, C, D, E); // 10 R1(W[11], E, A, B, C, D); R1(W[12], D, E, A, B, C); R1(W[13], C, D, E, A, B); R1(W[14], B, C, D, E, A); #else R1(W[7], D, E, A, B, C); W[8] = vsetzero(); R1(W[8], C, D, E, A, B); W[9] = vsetzero(); R1(W[9], B, C, D, E, A); W[10] = vsetzero(); R1(W[10], A, B, C, D, E); W[11] = vsetzero(); // 10 R1(W[11], E, A, B, C, D); W[12] = vsetzero(); R1(W[12], D, E, A, B, C); W[13] = vsetzero(); R1(W[13], C, D, E, A, B); W[14] = vsetzero(); R1(W[14], B, C, D, E, A); #endif // Fetch the message lengths, multiply 8 (to get the length in bits). W[15] = vslli_epi32(vload(&N[i]), 3); R1(W[15], A, B, C, D, E); // 15 X(W[0], W[2], W[8], W[13]); R1(W[0], E, A, B, C, D); X(W[1], W[3], W[9], W[14]); R1(W[1], D, E, A, B, C); X(W[2], W[4], W[10], W[15]); R1(W[2], C, D, E, A, B); X(W[3], W[5], W[11], W[0]); R1(W[3], B, C, D, E, A); K = vset1_epi32(0x6ED9EBA1); X(W[4], W[6], W[12], W[1]); R2(W[4], A, B, C, D, E); // 20 X(W[5], W[7], W[13], W[2]); R2(W[5], E, A, B, C, D); X(W[6], W[8], W[14], W[3]); R2(W[6], D, E, A, B, C); X(W[7], W[9], W[15], W[4]); R2(W[7], C, D, E, A, B); X(W[8], W[10], W[0], W[5]); R2(W[8], B, C, D, E, A); X(W[9], W[11], W[1], W[6]); R2(W[9], A, B, C, D, E); // 25 X(W[10], W[12], W[2], W[7]); R2(W[10], E, A, B, C, D); X(W[11], W[13], W[3], W[8]); R2(W[11], D, E, A, B, C); X(W[12], W[14], W[4], W[9]); R2(W[12], C, D, E, A, B); X(W[13], W[15], W[5], W[10]); R2(W[13], B, C, D, E, A); X(W[14], W[0], W[6], W[11]); R2(W[14], A, B, C, D, E); // 30 X(W[15], W[1], W[7], W[12]); R2(W[15], E, A, B, C, D); X(W[0], W[2], W[8], W[13]); R2(W[0], D, E, A, B, C); X(W[1], W[3], W[9], W[14]); R2(W[1], C, D, E, A, B); X(W[2], W[4], W[10], W[15]); R2(W[2], B, C, D, E, A); X(W[3], W[5], W[11], W[0]); R2(W[3], A, B, C, D, E); // 35 X(W[4], W[6], W[12], W[1]); R2(W[4], E, A, B, C, D); X(W[5], W[7], W[13], W[2]); R2(W[5], D, E, A, B, C); X(W[6], W[8], W[14], W[3]); R2(W[6], C, D, E, A, B); X(W[7], W[9], W[15], W[4]); R2(W[7], B, C, D, E, A); K = vset1_epi32(0x8F1BBCDC); X(W[8], W[10], W[0], W[5]); R3(W[8], A, B, C, D, E); // 40 X(W[9], W[11], W[1], W[6]); R3(W[9], E, A, B, C, D); X(W[10], W[12], W[2], W[7]); R3(W[10], D, E, A, B, C); X(W[11], W[13], W[3], W[8]); R3(W[11], C, D, E, A, B); X(W[12], W[14], W[4], W[9]); R3(W[12], B, C, D, E, A); X(W[13], W[15], W[5], W[10]); R3(W[13], A, B, C, D, E); // 45 X(W[14], W[0], W[6], W[11]); R3(W[14], E, A, B, C, D); X(W[15], W[1], W[7], W[12]); R3(W[15], D, E, A, B, C); X(W[0], W[2], W[8], W[13]); R3(W[0], C, D, E, A, B); X(W[1], W[3], W[9], W[14]); R3(W[1], B, C, D, E, A); X(W[2], W[4], W[10], W[15]); R3(W[2], A, B, C, D, E); // 50 X(W[3], W[5], W[11], W[0]); R3(W[3], E, A, B, C, D); X(W[4], W[6], W[12], W[1]); R3(W[4], D, E, A, B, C); X(W[5], W[7], W[13], W[2]); R3(W[5], C, D, E, A, B); X(W[6], W[8], W[14], W[3]); R3(W[6], B, C, D, E, A); X(W[7], W[9], W[15], W[4]); R3(W[7], A, B, C, D, E); // 55 X(W[8], W[10], W[0], W[5]); R3(W[8], E, A, B, C, D); X(W[9], W[11], W[1], W[6]); R3(W[9], D, E, A, B, C); X(W[10], W[12], W[2], W[7]); R3(W[10], C, D, E, A, B); X(W[11], W[13], W[3], W[8]); R3(W[11], B, C, D, E, A); K = vset1_epi32(0xCA62C1D6); X(W[12], W[14], W[4], W[9]); R2(W[12], A, B, C, D, E); // 60 X(W[13], W[15], W[5], W[10]); R2(W[13], E, A, B, C, D); X(W[14], W[0], W[6], W[11]); R2(W[14], D, E, A, B, C); X(W[15], W[1], W[7], W[12]); R2(W[15], C, D, E, A, B); X(W[0], W[2], W[8], W[13]); R2(W[0], B, C, D, E, A); X(W[1], W[3], W[9], W[14]); R2(W[1], A, B, C, D, E); // 65 X(W[2], W[4], W[10], W[15]); R2(W[2], E, A, B, C, D); X(W[3], W[5], W[11], W[0]); R2(W[3], D, E, A, B, C); X(W[4], W[6], W[12], W[1]); R2(W[4], C, D, E, A, B); X(W[5], W[7], W[13], W[2]); R2(W[5], B, C, D, E, A); X(W[6], W[8], W[14], W[3]); R2(W[6], A, B, C, D, E); // 70 X(W[7], W[9], W[15], W[4]); R2(W[7], E, A, B, C, D); X(W[8], W[10], W[0], W[5]); R2(W[8], D, E, A, B, C); X(W[9], W[11], W[1], W[6]); R2(W[9], C, D, E, A, B); X(W[10], W[12], W[2], W[7]); R2(W[10], B, C, D, E, A); X(W[11], W[13], W[3], W[8]); R4(W[11], A, B, C, D, E); // 75 // A75 has an interesting property, it is the first word that's (almost) // part of the final MD (E79 ror 2). The common case will be that this // doesn't match, so we stop here and save 5 rounds. // // Note that I'm using E due to displacement caused by vectorization, // this is A in standard SHA-1. vstore(&MD[i], E); } return count; } static int sha1_fmt_cmp_all(void *binary, int count) { uint32_t M; uint32_t i; vtype B; // This function is hot, we need to do this quickly. We use PCMP to find // out if any of the dwords in A75 matched E in the input hash. // First, Load the target hash into an XMM register B = vloadu(binary); M = 0; #ifdef _OPENMP #pragma omp parallel for reduction(|:M) #endif // We can test for matches 4/8 at a time. As the common case will be that // there is no match, we can avoid testing it after every compare, reducing // the number of branches. // // It's hard to convince GCC that it's safe to unroll this loop, so I've // manually unrolled it a little bit. for (i = 0; i < count; i += 64) { uint32_t R = 0; #if __AVX512F__ || __MIC__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); #elif __AVX2__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); #else R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 4])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 12])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 20])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 28])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 36])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 44])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 52])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); R |= vanyeq_epi32(B, vload(&MD[i + 60])); #endif M |= R; } return M; } static inline int sha1_fmt_get_hash(int index) { return MD[index]; } static int sha1_fmt_get_hash0(int index) { return sha1_fmt_get_hash(index) & PH_MASK_0; } static int sha1_fmt_get_hash1(int index) { return sha1_fmt_get_hash(index) & PH_MASK_1; } static int sha1_fmt_get_hash2(int index) { return sha1_fmt_get_hash(index) & PH_MASK_2; } static int sha1_fmt_get_hash3(int index) { return sha1_fmt_get_hash(index) & PH_MASK_3; } static int sha1_fmt_get_hash4(int index) { return sha1_fmt_get_hash(index) & PH_MASK_4; } static int sha1_fmt_get_hash5(int index) { return sha1_fmt_get_hash(index) & PH_MASK_5; } static int sha1_fmt_get_hash6(int index) { return sha1_fmt_get_hash(index) & PH_MASK_6; } static inline int sha1_fmt_get_binary(void *binary) { return *(uint32_t*)(binary); } static int sha1_fmt_binary0(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_0; } static int sha1_fmt_binary1(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_1; } static int sha1_fmt_binary2(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_2; } static int sha1_fmt_binary3(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_3; } static int sha1_fmt_binary4(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_4; } static int sha1_fmt_binary5(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_5; } static int sha1_fmt_binary6(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_6; } static int sha1_fmt_cmp_one(void *binary, int index) { // We can quickly check if it will be worth doing a full comparison here, // this lets us turn up SHA1_PARALLEL_HASH without too much overhead when a // partial match occurs. return sha1_fmt_get_binary(binary) == sha1_fmt_get_hash(index); } // This function is not hot, and will only be called for around 1:2^32 random // crypts. Use a real SHA-1 implementation to verify the result exactly. This // routine is only called by John when cmp_one succeeds. static int sha1_fmt_cmp_exact(char *source, int index) { uint32_t full_sha1_digest[SHA1_DIGEST_WORDS]; SHA_CTX ctx; char *key; // Fetch the original input to hash. key = sha1_fmt_get_key(index); SHA1_Init(&ctx); SHA1_Update(&ctx, key, strlen(key)); SHA1_Final((unsigned char*)(full_sha1_digest), &ctx); // Compare result. return !memcmp(rawsha1_common_get_binary(source), full_sha1_digest, sizeof(full_sha1_digest)); } struct fmt_main fmt_sha1_ng = { .params = { .label = "Raw-SHA1-ng", #if VWIDTH == 16 .format_name = "(pwlen <= 55)", #if __MIC__ .algorithm_name = "SHA1 512/512 MIC 16x", #else .algorithm_name = "SHA1 512/512 AVX512 16x", #endif #elif VWIDTH == 8 .format_name = "(pwlen <= 31)", .algorithm_name = "SHA1 256/256 AVX2 8x", #else .format_name = "(pwlen <= 15)", .algorithm_name = "SHA1 128/128 " #if __ALTIVEC__ "AltiVec" #elif __ARM_NEON "NEON" #elif __XOP__ "XOP" #elif __AVX__ "AVX" #elif __SSE4_1__ "SSE4.1" #else "SSE2" #endif " 4x", #endif .benchmark_comment = "", .benchmark_length = -1, #if VWIDTH * 4 - 1 > 55 .plaintext_length = 55, #else .plaintext_length = sizeof(vtype) - 1, #endif .binary_size = sizeof(vtype), .binary_align = VWIDTH * 4, .salt_size = 0, .salt_align = 1, .min_keys_per_crypt = VWIDTH, .max_keys_per_crypt = SHA1_PARALLEL_HASH, .flags = #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, .tunable_cost_name = { NULL }, .signature = { FORMAT_TAG, FORMAT_TAG_OLD }, .tests = rawsha1_common_tests, }, .methods = { .init = sha1_fmt_init, .done = done, .reset = fmt_default_reset, .prepare = rawsha1_common_prepare, .valid = rawsha1_common_valid, .split = rawsha1_common_split, .binary = sha1_fmt_binary, .salt = fmt_default_salt, .tunable_cost_value = { NULL }, .source = fmt_default_source, .salt_hash = fmt_default_salt_hash, .set_salt = fmt_default_set_salt, .set_key = sha1_fmt_set_key, .get_key = sha1_fmt_get_key, .clear_keys = fmt_default_clear_keys, .crypt_all = sha1_fmt_crypt_all, .get_hash = { [0] = sha1_fmt_get_hash0, [1] = sha1_fmt_get_hash1, [2] = sha1_fmt_get_hash2, [3] = sha1_fmt_get_hash3, [4] = sha1_fmt_get_hash4, [5] = sha1_fmt_get_hash5, [6] = sha1_fmt_get_hash6, }, .binary_hash = { [0] = sha1_fmt_binary0, [1] = sha1_fmt_binary1, [2] = sha1_fmt_binary2, [3] = sha1_fmt_binary3, [4] = sha1_fmt_binary4, [5] = sha1_fmt_binary5, [6] = sha1_fmt_binary6, }, .cmp_all = sha1_fmt_cmp_all, .cmp_one = sha1_fmt_cmp_one, .cmp_exact = sha1_fmt_cmp_exact }, }; #endif /* plugin stanza */ #endif /* defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER */
mssql12_fmt_plug.c
/* Modified in August, 2012 by Dhiru Kholia (dhiru at openwall.com) for MS SQL 2012 * * This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Modified by Mathieu Perrin (mathieu at tpfh.org) 09/06 * Microsoft MS-SQL05 password cracker * * UTF-8 support by magnum 2011, same terms as above * * Creating MS SQL 2012 hashes: * * sqlcmd -L * sqlcmd -S <server> -U sa -P <password> * 1> select pwdencrypt("openwall") * 2> go * * Dumping hashes from MS SQL server 2012: * * sqlcmd -S <server> -U sa -P <password> * 1> select * from sys.sql_logins * 2> go */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mssql12; #elif FMT_REGISTERS_H john_register_one(&fmt_mssql12); #else #include <string.h> #include "arch.h" //#undef _OPENMP //#undef SIMD_COEF_32 //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #include "misc.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "sha2.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 1024 // tuned K8-dual HT #endif #endif #endif #define FORMAT_LABEL "mssql12" #define FORMAT_NAME "MS SQL 2012/2014" #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH ((111 - SALT_SIZE) / 2) #define CIPHERTEXT_LENGTH 54 + 44 * 2 #define BINARY_SIZE 8 #define DIGEST_SIZE 64 #define BINARY_ALIGN 8 #define SALT_SIZE 4 #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifndef SHA_BUF_SIZ #define SHA_BUF_SIZ 16 #endif static struct fmt_tests tests[] = { {"0x0200F733058A07892C5CACE899768F89965F6BD1DED7955FE89E1C9A10E27849B0B213B5CE92CC9347ECCB34C3EFADAF2FD99BFFECD8D9150DD6AACB5D409A9D2652A4E0AF16", "Password1!"}, {"0x0200AB3E1F9028A739EEF62ABF672427276A32D5EDD349E638E7F2CD81DAA247CFE20EE4E3B0A30B2D0AE3C3FA010E61752F1BF45E045041F1B988C083C7F118527E3E5F0562", "openwall"}, /* hashes from https://hashcat.net/forum */ {"0x02006BF4AB05873FF0C8A4AFD1DC5912CBFDEF62E0520A3353B04E1184F05C873C9C76BBADDEAAC1E9948C7B6ABFFD62BFEFD7139F17F6AFE10BE0FEE7A178644623067C2423", "carlos"}, {"0x0200935819BA20F1C7289CFF2F8FF9F0E40DA5E6D04986F988CFE6603DA0D2BC0160776614763198967D603FBD8C103151A15E70D18E7B494C7F13F16804A7A4EB206084E632", "test"}, {"0x0200570AC969EF7C6CCB3312E8BEDE1D635EB852C06496957F0FA845B20FCD1C7C457474A5B948B68C47C2CB704D08978871F532C9EB11199BB5F56A06AC915C3799DB8A64C1", "test1"}, {"0x0200A56045DBCD848E297FA8D06E7579D62B7129928CA0BC5D232A7320972EF5A5455C01411B8D3A7FF3D18A55058A12FAEE5DA410AFE6CE61FF5C39E5FF57CD3EDD57DB1C3B", "test2"}, {"0x020059799F1B6D897BE2C5A76D3FFDC52B308190E82FA01F2FA51129B4863A7EE21B3FF6FE9F7850976045237805F338DD36DC9345B429F47A402614C6F2F2B02C56DF14C4F4", "Paul"}, {"0x0200881E2999DD8E3583695F405696257B99559953705A34D774C15AC1D42699BB77BC56DB5F657751335C1B350890E643790553B60329CAE7A2E7D3C04CF8856C4DB0058723", "DBAmaster"}, {"0x0200D648446E70180A6DFB6DF14DB38623EBFE490FE445751900FD5DC45A2B5D20D7AFFE8C6FFC2890BAE1AF34430A21F2F1E4DE50E25757FDB4789716D8D85C6985A00BC454", "database"}, {"0x02008AC3B9DC7B67EF9D3C1D25D8007A4B957D5BD61D71E5E9DA08D9F8F012EDDAD168E1CADD93D4627433FBFEE8BCF6CBB42D5B9A31886FC5FF7F970B164F4B5815E03D6DE7", "jhl9mqe5"}, {"0x020094C4D05A082DB1362B1A972C5D5F1C04C527090A7427E93C13AFEC705A011D8980E994FA647C7D44E25A427246218E25674571DB1710E49C713FB17129549C29E303086A", "coldfusion"}, {"0x0200B9BD5C85918D9BEE84417957618FBA1CB80B71E81550FAE09AD027B4089017CD6461D8EC9509873C2D5096CDBE8F16E4EFA9035C35F9F4917CE58DB99DC6836CEA7483A7", "sql2005"}, {NULL} }; static unsigned char cursalt[SALT_SIZE]; #ifdef SIMD_COEF_64 static uint64_t (*saved_key)[SHA_BUF_SIZ]; static uint64_t (*crypt_out); static int max_keys; static int new_keys; #else static char (*saved_key)[(PLAINTEXT_LENGTH + 1) * 2 + SALT_SIZE]; static uint64_t (*crypt_out)[DIGEST_SIZE / 8]; static int *saved_len; #endif static int valid(char *ciphertext, struct fmt_main *self) { int i; if (strncmp(ciphertext, "0x0200", 6)) return 0; if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH) return 0; for (i = 6; i < CIPHERTEXT_LENGTH; i++) { if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || //(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static void set_salt(void *salt) { memcpy(cursalt, salt, SALT_SIZE); #ifdef SIMD_COEF_64 new_keys = 1; #endif } static void *get_salt(char *ciphertext) { static unsigned char *out2; int l; if (!out2) out2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); for (l = 0;l<SALT_SIZE;l++) { out2[l] = atoi16[ARCH_INDEX(ciphertext[l*2+6])]*16 + atoi16[ARCH_INDEX(ciphertext[l*2+7])]; } return out2; } static void set_key_enc(char *_key, int index); static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_64 saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt, 8 * sizeof(uint64_t), MEM_ALIGN_SIMD); max_keys = self->params.max_keys_per_crypt; #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); #endif if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3); if (options.target_enc != ISO_8859_1 && options.target_enc != ASCII) self->methods.set_key = set_key_enc; } static void done(void) { #ifndef SIMD_COEF_64 MEM_FREE(saved_len); #endif MEM_FREE(crypt_out); MEM_FREE(saved_key); } #ifdef SIMD_COEF_64 static void clear_keys(void) { memset(saved_key, 0, sizeof(*saved_key) * max_keys); } #endif static void set_key(char *_key, int index) { #ifndef SIMD_COEF_64 /* ASCII or ISO-8859-1 to UCS-2 */ UTF8 *s = (UTF8*)_key; UTF16 *d = (UTF16*)saved_key[index]; for (saved_len[index] = 0; s[saved_len[index]]; saved_len[index]++) #if ARCH_LITTLE_ENDIAN d[saved_len[index]] = s[saved_len[index]]; #else d[saved_len[index]] = s[saved_len[index]] << 8; #endif d[saved_len[index]] = 0; saved_len[index] <<= 1; #else uint64_t *keybuffer = saved_key[index]; unsigned short *w16 = (unsigned short*)keybuffer; UTF8 *key = (UTF8*)_key; int len = 0; while ((*w16++ = *key++)) len++; keybuffer[15] = ((len << 1) + SALT_SIZE) << 3; new_keys = 1; #if !ARCH_LITTLE_ENDIAN alter_endianity_w16(saved_key[index], len<<1); #endif #endif } static void set_key_enc(char *_key, int index) { #ifndef SIMD_COEF_64 /* Any encoding -> UTF-16 */ saved_len[index] = enc_to_utf16((UTF16*)saved_key[index], PLAINTEXT_LENGTH, (unsigned char*)_key, strlen(_key)); if (saved_len[index] < 0) saved_len[index] = strlen16((UTF16*)saved_key[index]); saved_len[index] <<= 1; #else uint64_t *keybuffer = saved_key[index]; UTF16 *w16 = (UTF16*)keybuffer; UTF8 *key = (UTF8*)_key; int len; len = enc_to_utf16(w16, PLAINTEXT_LENGTH, key, strlen(_key)); if (len < 0) len = strlen16(w16); keybuffer[15] = ((len << 1) + SALT_SIZE) << 3; new_keys = 1; #endif } static char *get_key(int index) { #ifndef SIMD_COEF_64 ((UTF16*)saved_key[index])[saved_len[index]>>1] = 0; return (char*)utf16_to_enc((UTF16*)saved_key[index]); #else uint64_t *keybuffer = saved_key[index]; UTF16 *w16 = (UTF16*)keybuffer; static UTF16 out[PLAINTEXT_LENGTH + 1]; unsigned int i, len; len = ((keybuffer[15] >> 3) - SALT_SIZE) >> 1; for (i = 0; i < len; i++) out[i] = w16[i]; out[i] = 0; return (char*)utf16_to_enc(out); #endif } static void *get_binary(char *ciphertext) { static uint64_t out[SHA_BUF_SIZ]; char *realcipher = (char*)out; int i; for (i = 0;i<DIGEST_SIZE;i++) realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+14])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+15])]; #ifdef SIMD_COEF_64 #if ARCH_LITTLE_ENDIAN==1 alter_endianity_to_BE64 (realcipher, DIGEST_SIZE/8); #endif #ifdef REVERSE_STEPS sha512_reverse(out); #endif #endif return (void *)realcipher; } #define BASE_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64) #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || PLAINTEXT_LENGTH > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_64 if (new_keys) { int i; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { uint64_t *keybuffer = saved_key[index + i]; unsigned char *wucp = (unsigned char*)keybuffer; int j, len = (keybuffer[15] >> 3) - SALT_SIZE; if (len >= 0) for (j = 0; j < SALT_SIZE; j++) wucp[len + j] = cursalt[j]; wucp[len + 4] = 0x80; } } SIMDSHA512body(&saved_key[index], &crypt_out[BASE_IDX], NULL, SSEi_REVERSE_STEPS | SSEi_FLAT_IN); #else SHA512_CTX ctx; memcpy(saved_key[index]+saved_len[index], cursalt, SALT_SIZE); SHA512_Init(&ctx ); SHA512_Update(&ctx, saved_key[index], saved_len[index]+SALT_SIZE ); SHA512_Final((unsigned char *)crypt_out[index], &ctx); #endif } #ifdef SIMD_COEF_64 new_keys = 0; #endif return count; } #define COMMON_GET_HASH_SIMD64 8 #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" #define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64) static int binary_hash_0(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((uint64_t*)binary)[0] == crypt_out[HASH_IDX]) return 1; #else if ( ((uint64_t*)binary)[0] == crypt_out[index][0] ) return 1; #endif return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 return (((uint64_t*)binary)[0] == crypt_out[HASH_IDX]); #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { uint64_t *binary = get_binary(source); #if SIMD_COEF_64 char *key = get_key(index); UTF16 wkey[PLAINTEXT_LENGTH]; SHA512_CTX ctx; uint64_t crypt_out[DIGEST_SIZE / sizeof(uint64_t)]; int len; len = enc_to_utf16(wkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (len < 0) len = strlen16(wkey); len *= 2; SHA512_Init(&ctx); SHA512_Update(&ctx, wkey, len); SHA512_Update(&ctx, cursalt, SALT_SIZE); SHA512_Final((unsigned char*)crypt_out, &ctx); #if ARCH_LITTLE_ENDIAN==1 alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8); #endif #ifdef REVERSE_STEPS sha512_reverse(crypt_out); #endif return !memcmp(binary, crypt_out, DIGEST_SIZE); #else return !memcmp(binary, crypt_out[index], DIGEST_SIZE); #endif } static int salt_hash(void *salt) { // The >> 8 gave much better distribution on a huge set I analysed // although that was mssql05 return (*((uint32_t *)salt) >> 8) & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_mssql12 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, #ifdef SIMD_COEF_64 clear_keys, #else fmt_default_clear_keys, #endif crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
critical.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> int main() { #pragma omp critical { print_current_address(1); print_ids(0); } print_current_address(2); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_wait_critical: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_critical: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_critical: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] return 0; }
KPP_Global.h
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* */ /* Global Data Header File */ /* */ /* Generated by KPP-2.2.3 symbolic chemistry Kinetics PreProcessor */ /* (http://www.cs.vt.edu/~asandu/Software/KPP) */ /* KPP is distributed under GPL, the general public licence */ /* (http://www.gnu.org/copyleft/gpl.html) */ /* (C) 1995-1997, V. Damian & A. Sandu, CGRER, Univ. Iowa */ /* (C) 1997-2005, A. Sandu, Michigan Tech, Virginia Tech */ /* With important contributions from: */ /* M. Damian, Villanova University, USA */ /* R. Sander, Max-Planck Institute for Chemistry, Mainz, Germany */ /* */ /* File : KPP_Global.h */ /* Equation file : KPP.kpp */ /* Output root filename : KPP */ /* */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include "omp.h" #ifndef KPP_GLOBAL_H_INCLUDED #define KPP_GLOBAL_H_INCLUDED /* Declaration of global variables */ extern double C[NSPEC]; /* Concentration of all species */ extern double * VAR; extern double * FIX; extern double RCONST[NREACT]; /* Rate constants (global) */ extern double TIME; /* Current integration time */ extern int LOOKAT[NLOOKAT]; /* Indexes of species to look at */ extern const char * SPC_NAMES[NSPEC]; /* Names of chemical species */ extern char * SMASS[NMASS]; /* Names of atoms for mass balance */ extern const char * EQN_NAMES[NREACT]; /* Equation names */ extern char * EQN_TAGS[NREACT]; /* Equation tags */ /* INLINED global variable declarations */ extern double NOON_JRATES[NPHOTOL]; /* Noon-time photolysis rates */ extern double PHOTOL[NPHOTOL]; /* Photolysis rates */ extern double HET[NSPEC][3]; /* Heterogeneous reaction rates */ extern double SZA_CST[3]; /* Constants to compute cosSZA */ /* The following variables need to be declared THREADPRIVATE * because they get written to within an OpenMP parallel loop */ #pragma omp threadprivate( C, VAR, FIX, RCONST, NOON_JRATES, PHOTOL, HET, TIME, SZA_CST ) /* INLINED global variable declarations */ #endif /* KPP_GLOBAL_H_INCLUDED */
convolution_sgemm_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD void im2col_sgemm_pack8to1_int8_neon_arm82dot(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_neon_arm82dot(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h); #endif static void im2col_sgemm_pack8to1_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { im2col_sgemm_pack8to1_int8_neon_arm82dot(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #else // __ARM_FEATURE_DOTPROD if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // split pack8to1 to pack4 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld2 {v0.4s, v1.4s}, [%0], #32 \n" "ld2 {v2.4s, v3.4s}, [%0], #32 \n" "ld2 {v4.4s, v5.4s}, [%0], #32 \n" "ld2 {v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #96 \n" "st1 {v0.16b}, [%1], #16 \n" "st1 {v2.16b}, [%1], #16 \n" "st1 {v4.16b}, [%1], #16 \n" "st1 {v6.16b}, [%1], #16 \n" "st1 {v1.16b}, [%1], #16 \n" "st1 {v3.16b}, [%1], #16 \n" "st1 {v5.16b}, [%1], #16 \n" "st1 {v7.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size * 8; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld2 {v0.4s, v1.4s}, [%0], #32 \n" "ld2 {v2.4s, v3.4s}, [%0] \n" "sub %0, %0, #32 \n" "st1 {v0.16b}, [%1], #16 \n" "st1 {v2.16b}, [%1], #16 \n" "st1 {v1.16b}, [%1], #16 \n" "st1 {v3.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld2 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.16b, v1.16b}, [%0] \n" "st1 {v0.16b, v1.16b}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #endif // __ARM_FEATURE_DOTPROD img0 += size * 8; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld2 {v0.2s, v1.2s}, [%0] \n" "st1 {v0.2s, v1.2s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #endif // __ARM_FEATURE_DOTPROD #else asm volatile( "pld [%0, #128] \n" "vld1.s8 {d0-d1}, [%0 :64] \n" "vst1.s8 {d0-d1}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif img0 += size * 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0 :64] \n" "vst1.s8 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); #endif img0 += size * 8; } } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v24.16b}, [%6], #16 \n" // _w0123_l "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "ld1 {v16.16b}, [%5], #16 \n" // _val0123_l "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "ld1 {v17.16b}, [%5], #16 \n" // _val4567_l "sdot v0.4s, v24.16b, v16.4b[0] \n" "sdot v1.4s, v24.16b, v16.4b[1] \n" "sdot v2.4s, v24.16b, v16.4b[2] \n" "sdot v3.4s, v24.16b, v16.4b[3] \n" "ld1 {v18.16b}, [%5], #16 \n" // _val891011_l "sdot v4.4s, v24.16b, v17.4b[0] \n" "sdot v5.4s, v24.16b, v17.4b[1] \n" "sdot v6.4s, v24.16b, v17.4b[2] \n" "sdot v7.4s, v24.16b, v17.4b[3] \n" "ld1 {v19.16b}, [%5], #16 \n" // _val12131415_l "sdot v8.4s, v24.16b, v18.4b[0] \n" "sdot v9.4s, v24.16b, v18.4b[1] \n" "ld1 {v25.16b}, [%6], #16 \n" // _w0123_h "sdot v10.4s, v24.16b, v18.4b[2] \n" "sdot v11.4s, v24.16b, v18.4b[3] \n" "ld1 {v20.16b}, [%5], #16 \n" // _val0123_h "sdot v12.4s, v24.16b, v19.4b[0] \n" "sdot v13.4s, v24.16b, v19.4b[1] \n" "sdot v14.4s, v24.16b, v19.4b[2] \n" "sdot v15.4s, v24.16b, v19.4b[3] \n" "ld1 {v21.16b}, [%5], #16 \n" // _val4567_h "sdot v0.4s, v25.16b, v20.4b[0] \n" "sdot v1.4s, v25.16b, v20.4b[1] \n" "sdot v2.4s, v25.16b, v20.4b[2] \n" "sdot v3.4s, v25.16b, v20.4b[3] \n" "ld1 {v22.16b}, [%5], #16 \n" // _val891011_h "sdot v4.4s, v25.16b, v21.4b[0] \n" "sdot v5.4s, v25.16b, v21.4b[1] \n" "sdot v6.4s, v25.16b, v21.4b[2] \n" "sdot v7.4s, v25.16b, v21.4b[3] \n" "ld1 {v23.16b}, [%5], #16 \n" // _val12131415_h "sdot v8.4s, v25.16b, v22.4b[0] \n" "sdot v9.4s, v25.16b, v22.4b[1] \n" "ld1 {v24.16b}, [%6], #16 \n" // _w0123_l "sdot v10.4s, v25.16b, v22.4b[2] \n" "sdot v11.4s, v25.16b, v22.4b[3] \n" "ld1 {v16.16b}, [%5], #16 \n" // _val0123_l "sdot v12.4s, v25.16b, v23.4b[0] \n" "sdot v13.4s, v25.16b, v23.4b[1] \n" "subs %w4, %w4, #1 \n" "sdot v14.4s, v25.16b, v23.4b[2] \n" "sdot v15.4s, v25.16b, v23.4b[3] \n" "bne 0b \n" "sub %5, %5, #16 \n" "sub %6, %6, #16 \n" // transpose 4x16 "trn1 v16.4s, v0.4s, v1.4s \n" "trn2 v17.4s, v0.4s, v1.4s \n" "trn1 v18.4s, v2.4s, v3.4s \n" "trn2 v19.4s, v2.4s, v3.4s \n" "trn1 v20.4s, v4.4s, v5.4s \n" "trn2 v21.4s, v4.4s, v5.4s \n" "trn1 v22.4s, v6.4s, v7.4s \n" "trn2 v23.4s, v6.4s, v7.4s \n" "trn1 v24.4s, v8.4s, v9.4s \n" "trn2 v25.4s, v8.4s, v9.4s \n" "trn1 v26.4s, v10.4s, v11.4s \n" "trn2 v27.4s, v10.4s, v11.4s \n" "trn1 v28.4s, v12.4s, v13.4s \n" "trn2 v29.4s, v12.4s, v13.4s \n" "trn1 v30.4s, v14.4s, v15.4s \n" "trn2 v31.4s, v14.4s, v15.4s \n" "trn1 v0.2d, v16.2d, v18.2d \n" "trn2 v8.2d, v16.2d, v18.2d \n" "trn1 v4.2d, v17.2d, v19.2d \n" "trn2 v12.2d, v17.2d, v19.2d \n" "trn1 v1.2d, v20.2d, v22.2d \n" "trn2 v9.2d, v20.2d, v22.2d \n" "trn1 v5.2d, v21.2d, v23.2d \n" "trn2 v13.2d, v21.2d, v23.2d \n" "trn1 v2.2d, v24.2d, v26.2d \n" "trn2 v10.2d, v24.2d, v26.2d \n" "trn1 v6.2d, v25.2d, v27.2d \n" "trn2 v14.2d, v25.2d, v27.2d \n" "trn1 v3.2d, v28.2d, v30.2d \n" "trn2 v11.2d, v28.2d, v30.2d \n" "trn1 v7.2d, v29.2d, v31.2d \n" "trn2 v15.2d, v29.2d, v31.2d \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3], #64 \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(tmpptr), "6"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } // transpose 4x8 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5); int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); _sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0])); _sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1])); _sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0])); _sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); vst1q_s32(outptr0 + 4, _sum4); vst1q_s32(outptr1 + 4, _sum5); vst1q_s32(outptr2 + 4, _sum6); vst1q_s32(outptr3 + 4, _sum7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } // transpose 4x4 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%5, #128] \n" "prfm pldl1keep, [%6, #256] \n" "lsr w4, %w4, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%6, #512] \n" "add x5, %5, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%5] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%6], #64 \n" "add %5, %5, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%5] \n" "add %5, %5, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%6, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%6, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %5, %5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %5, %5, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%6], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%5, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%5, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %5, %5, #64 \n" "sub %6, %6, #64 \n" "1: \n" "and w4, %w4, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%5], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%6], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%5], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v4.4s \n" "addp v1.4s, v1.4s, v5.4s \n" "addp v2.4s, v2.4s, v6.4s \n" "addp v3.4s, v3.4s, v7.4s \n" "addp v8.4s, v8.4s, v12.4s \n" "addp v9.4s, v9.4s, v13.4s \n" "addp v10.4s, v10.4s, v14.4s \n" "addp v11.4s, v11.4s, v15.4s \n" "addp v0.4s, v0.4s, v8.4s \n" "addp v1.4s, v1.4s, v9.4s \n" "addp v2.4s, v2.4s, v10.4s \n" "addp v3.4s, v3.4s, v11.4s \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v2.4s}, [%2], #16 \n" "st1 {v3.4s}, [%3], #16 \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(tmpptr), "6"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 #if __aarch64__ #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val01_l_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val01_l_h, 2); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); vst1q_lane_s32(outptr0 + 1, _sum1, 0); vst1q_lane_s32(outptr1 + 1, _sum1, 1); vst1q_lane_s32(outptr2 + 1, _sum1, 2); vst1q_lane_s32(outptr3 + 1, _sum1, 3); outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); int32x4_t _sum0 = vpaddq_s32(_s001, _s023); int32x4_t _sum1 = vpaddq_s32(_s101, _s123); vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); vst1q_lane_s32(outptr0 + 1, _sum1, 0); vst1q_lane_s32(outptr1 + 1, _sum1, 1); vst1q_lane_s32(outptr2 + 1, _sum1, 2); vst1q_lane_s32(outptr3 + 1, _sum1, 3); outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "pld [%5, #256] \n" "lsr r4, %4, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %6, #16 \n" "pld [%6, #128] \n" "mov r6, #32 \n" "pld [%6, #384] \n" "vld1.s8 {d20-d21}, [%6 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%5 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%6 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%5, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%6, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%5 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%6 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%5 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%6 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%5, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%6, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %5, %5, #32 \n" "sub %6, %6, #64 \n" "1: \n" "and r4, %4, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%5 :128]! \n" // _val "vld1.s8 {d20-d21}, [%6 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%6 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "vst1.s32 {d0[0]}, [%0]! \n" "vst1.s32 {d0[1]}, [%1]! \n" "vst1.s32 {d1[0]}, [%2]! \n" "vst1.s32 {d1[1]}, [%3]! \n" "vst1.s32 {d2[0]}, [%0]! \n" "vst1.s32 {d2[1]}, [%1]! \n" "vst1.s32 {d3[0]}, [%2]! \n" "vst1.s32 {d3[1]}, [%3]! \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(tmpptr), "6"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; #endif // __ARM_FEATURE_DOTPROD } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32); int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48); int8x16_t _val0123_h = vld1q_s8(tmpptr + 64); int8x16_t _val4567_h = vld1q_s8(tmpptr + 80); int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96); int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0); _sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0); _sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1); _sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1); _sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1); tmpptr += 128; kptr0 += 8; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1); tmpptr += 64; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; } #endif // __ARM_FEATURE_DOTPROD for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_s32(outptr0, _sum0); outptr0 += 4; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _val2 = vld1q_s8(tmpptr + 32); int8x16_t _val3 = vld1q_s8(tmpptr + 48); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w)); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w)); _s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w)); _s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 64; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4)); int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6)); int32x2_t _ss0 = vpadd_s32(_s0, _s2); int32x2_t _ss1 = vpadd_s32(_s4, _s6); int32x4_t _ss = vcombine_s32(_ss0, _ss1); vst1q_s32(outptr0, _ss); outptr0 += 4; #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x2_t _sum0 = vdup_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val01_lh = vld1q_s8(tmpptr); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0); _sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1); tmpptr += 16; kptr0 += 8; } int32x2_t _sum = vadd_s32(_sum0, _sum1); vst1_s32(outptr0, _sum); outptr0 += 2; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 32; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _ss = vpadd_s32(_s0, _s2); vst1_s32(outptr0, _ss); outptr0 += 2; #endif // __ARM_FEATURE_DOTPROD } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); _sum0 = vdotq_s32(_sum0, _val, _w); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); _sum1 = vdot_s32(_sum1, _val, _w); tmpptr += 8; kptr0 += 8; } int sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1); outptr0[0] = sum; outptr0 += 1; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w)); _s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s8 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 8; kptr0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ int sum = vaddvq_s32(_sum); // dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); int sum = vget_lane_s32(_ss, 0); #endif outptr0[0] = sum; outptr0 += 1; #endif // __ARM_FEATURE_DOTPROD } } } static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { convolution_im2col_sgemm_transform_kernel_pack8to1_int8_neon_arm82dot(_kernel, kernel_tm, inch, outch, kernel_w, kernel_h); return; } #endif const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u); else kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u); int q = 0; for (; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } } // TODO unroll 2 for (; q < outch; q++) { signed char* g00 = kernel_tm.channel(q / 4 + q % 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { int8x8_t _val0 = vld1_s8(sptr); int8x8_t _val1 = vld1_s8(sptr + stride_w * 8); int8x8_t _val2 = vld1_s8(sptr + stride_w * 16); int8x8_t _val3 = vld1_s8(sptr + stride_w * 24); vst1_s8(ptr, _val0); vst1_s8(ptr + 8, _val1); vst1_s8(ptr + 16, _val2); vst1_s8(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { int8x8_t _val0 = vld1_s8(sptr); int8x8_t _val1 = vld1_s8(sptr + stride_w * 8); vst1_s8(ptr, _val0); vst1_s8(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { int8x8_t _val = vld1_s8(sptr); vst1_s8(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8to1_int8_neon(bottom_im2col, top_blob, kernel, opt); }
rose_jacobi.c
#include "rex_kmp.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #include <immintrin.h> #include <immintrin.h> #define REAL float static double read_timer_ms() { struct timeb tm; ftime(&tm); return ((double )tm . time) * 1000.0 + ((double )tm . millitm); } /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define DEFAULT_DIMSIZE 256 void print_array(char *title,char *name,float *A,int n,int m) { printf("%s:\n",title); int i; int j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ",name,i,j,A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize(int n,int m,float alpha,float *dx,float *dy,float *u_p,float *f_p) { int i; int j; int xx; int yy; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); //double PI=3.1415926; *dx = (2.0 / (n - 1)); *dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int )(- 1.0 + ( *dx * (i - 1)))); yy = ((int )(- 1.0 + ( *dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy))); } } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check(int n,int m,float alpha,float dx,float dy,float *u_p,float *f_p) { int i; int j; float xx; float yy; float temp; float error; error = 0.0; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (- 1.0 + (dx * (i - 1))); yy = (- 1.0 + (dy * (j - 1))); temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy))); error = error + temp * temp; } error = (sqrt(error) / (n * m)); printf("Solution Error: %2.6g\n",error); } void jacobi_seq(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); void jacobi_omp(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); int main(int argc,char *argv[]) { int status = 0; int n = 256; int m = 256; float alpha = 0.0543; float tol = 0.0000000001; float relax = 1.0; int mits = 5000; /*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n"); fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n); fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m); fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha); fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol); fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax); fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/ if (argc == 2) { sscanf(argv[1],"%d",&n); m = n; } else if (argc == 3) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); } else if (argc == 4) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); } else if (argc == 5) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); } else if (argc == 6) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); } else if (argc == 7) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); sscanf(argv[6],"%d",&mits); } else { /* the rest of arg ignored */ } printf("jacobi %d %d %g %g %g %d\n",n,m,alpha,tol,relax,mits); printf("------------------------------------------------------------------------------------------------------\n"); /** init the array */ float *u = (float *)(malloc(sizeof(float ) * n * m)); float *uomp = (float *)(malloc(sizeof(float ) * n * m)); float *f = (float *)(malloc(sizeof(float ) * n * m)); float dx; /* grid spacing in x direction */ float dy; /* grid spacing in y direction */ initialize(n,m,alpha,&dx,&dy,u,f); memcpy(uomp,u,sizeof(float ) * n * m); double elapsed = read_timer_ms(); jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits); elapsed = read_timer_ms() - elapsed; printf("seq elasped time(ms): %4f\n",elapsed); double mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed; printf("MFLOPS: %12.6g\n",mflops); puts("================"); elapsed = read_timer_ms(); jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits); elapsed = read_timer_ms() - elapsed; printf("OpenMP elasped time(ms): %4f\n",elapsed); mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed; printf("MFLOPS: %12.6g\n",mflops); //print_array("Sequential Run", "u",(REAL*)u, n, m); error_check(n,m,alpha,dx,dy,u,f); free(u); free(f); free(uomp); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * mits Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi_seq(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float uold[n][m]; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); } void jacobi_omp(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float *tmp = (float *)(malloc(sizeof(float ) * n * m)); float (*uold)[m] = ((float (*)[m])tmp); float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; //printf("===================== iteration %d ===========================\n", k); /* Copy new solution into old */ for (i = 0; i < n; i++) { for (j = 0; j <= m - 1; j += 16) { float *__ptr39 = uold[i]; float *__ptr40 = u[i]; __m512 __vec41 = _mm512_loadu_ps(&__ptr40[j]); _mm512_storeu_ps(&__ptr39[j],__vec41); } } for (i = 1; i < n - 1; i++) { __m512 __vec0 = _mm512_set1_ps(ax); __m512 __vec7 = _mm512_set1_ps(ay); __m512 __vec15 = _mm512_set1_ps(b); __m512 __vec23 = _mm512_set1_ps(b); __m512 __part25 = _mm512_setzero_ps(); __m512 __vec29 = _mm512_set1_ps(omega); __m512 __vec30 = _mm512_set1_ps(resid); __m512 __vec33 = _mm512_set1_ps(error); __m512 __vec34 = _mm512_set1_ps(resid); __m512 __vec35 = _mm512_set1_ps(resid); __m512 __part38 = _mm512_setzero_ps(); for (j = 1; j <= m - 1 - 1; j += 16) { float *__ptr1 = uold[i - 1]; __m512 __vec2 = _mm512_loadu_ps(&__ptr1[j]); float *__ptr3 = uold[i + 1]; __m512 __vec4 = _mm512_loadu_ps(&__ptr3[j]); __m512 __vec5 = _mm512_add_ps(__vec4,__vec2); __m512 __vec6 = _mm512_mul_ps(__vec5,__vec0); float *__ptr8 = uold[i]; __m512 __vec9 = _mm512_loadu_ps(&__ptr8[j - 1]); float *__ptr10 = uold[i]; __m512 __vec11 = _mm512_loadu_ps(&__ptr10[j + 1]); __m512 __vec12 = _mm512_add_ps(__vec11,__vec9); __m512 __vec13 = _mm512_mul_ps(__vec12,__vec7); __m512 __vec14 = _mm512_add_ps(__vec13,__vec6); float *__ptr16 = uold[i]; __m512 __vec17 = _mm512_loadu_ps(&__ptr16[j]); __m512 __vec18 = _mm512_mul_ps(__vec17,__vec15); __m512 __vec19 = _mm512_add_ps(__vec18,__vec14); float *__ptr20 = f[i]; __m512 __vec21 = _mm512_loadu_ps(&__ptr20[j]); __m512 __vec22 = _mm512_sub_ps(__vec21,__vec19); __m512 __vec24 = _mm512_div_ps(__vec23,__vec22); __part25 = _mm512_add_ps(__part25,__vec24); float *__ptr26 = u[i]; float *__ptr27 = uold[i]; __m512 __vec28 = _mm512_loadu_ps(&__ptr27[j]); __m512 __vec31 = _mm512_mul_ps(__vec30,__vec29); __m512 __vec32 = _mm512_sub_ps(__vec31,__vec28); _mm512_storeu_ps(&__ptr26[j],__vec32); __m512 __vec36 = _mm512_mul_ps(__vec35,__vec34); __m512 __vec37 = _mm512_add_ps(__vec36,__vec33); __part38 = _mm512_add_ps(__part38,__vec37); } __m256 __buf3 = _mm512_extractf32x8_ps(__part38,0); __m256 __buf4 = _mm512_extractf32x8_ps(__part38,1); __buf4 = _mm256_add_ps(__buf3,__buf4); __buf4 = _mm256_hadd_ps(__buf4,__buf4); __buf4 = _mm256_hadd_ps(__buf4,__buf4); float __buf5[8]; _mm256_storeu_ps(&__buf5,__buf4); error = __buf5[0] + __buf5[6]; __m256 __buf0 = _mm512_extractf32x8_ps(__part25,0); __m256 __buf1 = _mm512_extractf32x8_ps(__part25,1); __buf1 = _mm256_add_ps(__buf0,__buf1); __buf1 = _mm256_hadd_ps(__buf1,__buf1); __buf1 = _mm256_hadd_ps(__buf1,__buf1); float __buf2[8]; _mm256_storeu_ps(&__buf2,__buf1); resid = __buf2[0] + __buf2[6]; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); free(tmp); }
GB_unaryop__ainv_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_int64 // op(A') function: GB_tran__ainv_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_int64 ( uint32_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_fp64) // op(A') function: GB (_unop_tran__identity_int32_fp64) // C type: int32_t // A type: double // cast: int32_t cij = GB_cast_to_int32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = GB_cast_to_int32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_fp64) ( int32_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; int32_t z = GB_cast_to_int32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; int32_t z = GB_cast_to_int32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
odd-even-merge-sort_omp_alpha.c
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<omp.h> #define MAX(a,b) ((a<b)?b:a) #define MIN(a,b) ((a>=b)?b:a) #define ODD(A,n,i) A[n+2*i] #define EVEN(A,n,i) A[n+2*i+1] void print_array(int *A,int l,int r) { printf("\n"); for(int i=l;i<r;i++) printf("%3d ",i); printf("\n"); for(int i=l;i<r;i++) printf("%3d ",A[i]); printf("\n"); } int *generate(int n) { int *A=(int *)malloc(sizeof(int)*n); srand(time(NULL)); for(int i=0;i<n;i++) A[i]=rand()%n; return A; } int *copy(int *A,int n) { int *C=(int *)malloc(sizeof(int)*n); for(int i=0;i<n;i++) C[i]=A[i]; return C; } int compare(const void *a,const void *b){return ( *(int *)a < *(int *)b )?0:1;} void validate(int *A1,int *A2,int n){for(int i=0;i<n;i++)if(A1[i]!=A2[i]){printf("failse\n");return;}printf("Success!\n");} void odd_even_merge_sort(int *A,int l,int c,int r); void odd_even_merge(int *A,int l,int c,int r); void odd_even_merge_2(int *A,int s); //int unsort[32]={2,3,18,9,23,11,4,25,0,13,6,21,14,27,1,10,15,5,16,17,8,24,22,12,19,29,26,30,28,7,31,20}; //int unsort_array[16]={2,3,9,11,4,13,6,14,1,10,15,5,16,8,12,7}; //int unsort_array[8]={2,3,4,6,1,5,8,7}; int log_2(int n){int i=0;while(n!=0){n=n>>1;i++;}return i;} int main(int argc,char* argv[]) { char *n="32"; int N=atoi(argc==2?argv[1]:n); int *unsort=generate(N); int *unqsort=copy(unsort,N); double odd_even_t = omp_get_wtime(); odd_even_merge_sort(unsort,0,N/2,N); odd_even_t = omp_get_wtime()-odd_even_t; // print_array(unsort,0,N); double qsort_t = omp_get_wtime(); qsort(unqsort,N,sizeof(int),&compare); qsort_t=omp_get_wtime()-qsort_t; validate(unsort,unqsort,N); printf("%lf (%lf times speedup)\n",odd_even_t,(qsort_t/odd_even_t)); return 0; } void odd_even_merge(int *A,int l,int c,int r) { int n=c-l; int N=n/2; /** pintf("enter odd_even_merge(n=%d)\n",n); print_array(A,l,r); **/ int *D=(int *)malloc(sizeof(int)*n); int *E=(int *)malloc(sizeof(int)*n); #pragma omp parallel sections shared(A,D,E) firstprivate(N,l,c,r) { #pragma omp section { int t0=0,t1=0; for(int i=0;i<n;i++) { if( t0 == N || ( t1 != N && ODD(A,l,t0) > ODD(A,c,t1) ) ) D[i]=ODD(A,c,t1++); else D[i]=ODD(A,l,t0++); } } #pragma omp section { int t2=0,t3=0; for(int i=0;i<n;i++) { if( t2 == N || ( t3 != N && EVEN(A,l,t2) > EVEN(A,c,t3)) ) E[i]=EVEN(A,c,t3++); else E[i]=EVEN(A,l,t2++); } } } //printf("D:");print_array(D,0,n); //printf("E:");print_array(E,0,n); A[l]=D[0]; for(int i=1;i<n;i++) { A[l+2*i-1]=MIN(D[i],E[i-1]); A[l+2*i]=MAX(D[i],E[i-1]); } A[r-1]=E[n-1]; } void odd_even_merge_2(int *A,int s) { int TMP[4]={A[s+0],A[s+1],A[s+2],A[s+3]}; A[s+0]=MIN(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])); A[s+1]=MIN(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3]))); A[s+2]=MAX(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3]))); A[s+3]=MAX(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3])); } void odd_even_merge_sort(int *A,int l,int c,int r) { //printf("odd_even_merge_sort(%d,%d,%d)\n",l,c,r); //print_array(A,l,r); if(c-l==4) { odd_even_merge_2(A,l); odd_even_merge_2(A,c); odd_even_merge(A,l,c,r); return; } odd_even_merge_sort(A,l,(l+c)/2,c); odd_even_merge_sort(A,c,(c+r)/2,r); odd_even_merge(A,l,c,r); }
equation_groupnorm.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #include <omp.h> #define ALIGNDOWN(N, A) ((N) & ~((A)-1)) #define USE_VECTORIZED_PATH 1 float upconvert_bf16(libxsmm_bfloat16 x) { union libxsmm_bfloat16_hp bf16_hp; bf16_hp.i[1] = x; bf16_hp.i[0] = 0; return bf16_hp.f; } void tpp_groupnorm_fwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel, libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel) { LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB); /* [NP, CP, NB, HW, CB] */ LIBXSMM_VLA_DECL(5, float, out, pout, CP, NB, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB); int np, group_size; group_size = (CP*CB)/G; #pragma omp parallel for for(np = 0; np < NP; np++){ LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, cp, cb, nb, hwb, g; float m, v; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param m_reduce_rows_params, m_reduce_groups_params, v_reduce_rows_params, v_reduce_groups_params, reduce_HW_params; libxsmm_meltw_unary_param all_zero_param; libxsmm_matrix_arg arg_array[5]; for (nb = 0; nb < NB; nb++) { /* [CP, nb, HW, CB] */ all_zero_param.out.primary = sum_X; all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; all_zero_G_kernel(&all_zero_param); LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); for (cp = 0; cp < CP; cp++){ /* [cp, nb, HW, CB] */ #pragma omp simd for (int cb = 0; cb < 2*CB; cb++) { tmp[cb] = 0.0f; } reduce_HW_params.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] -----> [2 * CB] */ reduce_HW_kernel(&reduce_HW_params); #pragma omp simd for (cb = 0; cb < 2*CB; cb++) { tmp[cb] += new_tmp[cb]; } } if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ g = (cp*CB)/group_size; /* determine current group */ m_reduce_rows_params.in.primary = tmp; m_reduce_rows_params.out.primary = &m; v_reduce_rows_params.in.primary = &tmp[CB]; v_reduce_rows_params.out.primary = &v; reduce_rows_kernel(&m_reduce_rows_params); reduce_rows_kernel(&v_reduce_rows_params); sum_X[g] += m; sum_X2[g] += v; } else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(i=0; i < CB; i += group_size){ m_reduce_groups_params.in.primary = &tmp[i]; m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)]; v_reduce_groups_params.in.primary = &tmp[CB + i]; v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)]; reduce_groups_kernel(&m_reduce_groups_params); reduce_groups_kernel(&v_reduce_groups_params); } } } for(g = 0; g < G; g++){ /* mean and variance calculation */ mean[np*NB*G + nb*G + g] = sum_X[g] / ((float)group_size * HW); var[np*NB*G + nb*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*NB*G + nb*G + g]*mean[np*NB*G + nb*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[g*group_size + j] = -1 * mean[np*NB*G + nb*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */ } } for (cp = 0; cp < CP; cp++){ arg_array[1].primary = &s[cp*CB]; /* [CB] */ arg_array[2].primary = &b[cp*CB]; /* [CB] */ arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, out, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW,CB] */ func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ } } } } } void tpp_groupnorm_fwd_bf16(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pinp, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pbeta, float *mean, float *var, libxsmm_bfloat16 *pout, float eps, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel, libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel) { LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, inp, pinp, CP, NB, HW, CB); /* [NP, CP, NB, HW, CB] */ LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, out, pout, CP, NB, HW, CB); LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, beta, pbeta, CB); int np, group_size; group_size = (CP*CB)/G; #pragma omp parallel for for(np = 0; np < NP; np++){ LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, nb, cp, cb, g, hwb; float m, v; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param m_reduce_rows_params, m_reduce_groups_params, v_reduce_rows_params, v_reduce_groups_params, reduce_HW_params; libxsmm_meltw_unary_param all_zero_param; libxsmm_matrix_arg arg_array[5]; for (nb = 0; nb < NB; nb++) { /* [CP, nb, HW, CB] */ all_zero_param.out.primary = sum_X; all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; all_zero_G_kernel(&all_zero_param); LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); for (cp = 0; cp < CP; cp++){ /* [cp, nb, HW, CB] */ #pragma omp simd for (cb = 0; cb < 2*CB; cb++) { tmp[cb] = 0.0f; } reduce_HW_params.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] -----> [2 * CB] */ reduce_HW_kernel(&reduce_HW_params); #pragma omp simd for (cb = 0; cb < 2*CB; cb++) { tmp[cb] += new_tmp[cb]; } } if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ g = (cp*CB)/group_size; /* determine current group */ m_reduce_rows_params.in.primary = tmp; m_reduce_rows_params.out.primary = &m; v_reduce_rows_params.in.primary = &tmp[CB]; v_reduce_rows_params.out.primary = &v; reduce_rows_kernel(&m_reduce_rows_params); reduce_rows_kernel(&v_reduce_rows_params); sum_X[g] += m; sum_X2[g] += v; } else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(i=0; i < CB; i += group_size){ m_reduce_groups_params.in.primary = &tmp[i]; m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)]; v_reduce_groups_params.in.primary = &tmp[CB + i]; v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)]; reduce_groups_kernel(&m_reduce_groups_params); reduce_groups_kernel(&v_reduce_groups_params); } } } for(g = 0; g < G; g++){ /* mean and variance calculation */ mean[np*NB*G + nb*G + g] = sum_X[g] / ((float)group_size * HW); var[np*NB*G + nb*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*NB*G + nb*G + g]*mean[np*NB*G + nb*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[g*group_size + j] = -1 * mean[np*NB*G + nb*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */ } } for (cp = 0; cp < CP; cp++){ arg_array[1].primary = &s[cp*CB]; /* [CB] */ arg_array[2].primary = &b[cp*CB]; /* [CB] */ arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, out, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW,CB] */ func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ } } } } } void tpp_groupnorm_bwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func, float eps) { int group_size; group_size = (CP*CB)/G; const float scale = 1.0f / ((float)CP*HW*CB); LIBXSMM_VLA_DECL(5, float, din, pdin, CP, NB, HW, CB); LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB); LIBXSMM_VLA_DECL(5, float, dout, pdout, CP, NB, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64); LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64); #pragma omp parallel { LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); int np; #pragma omp for for (np = 0; np < NP; np++) { int j, nb, g, cp, hwb; for(j = 0; j < CP*CB; j++){ dgamma_NP[np*CP*CB + j] = 0.0f; dbeta_NP[np*CP*CB + j] = 0.0f; } libxsmm_matrix_eqn_param eqn_param; libxsmm_matrix_arg arg_array[10]; eqn_param.inputs = arg_array; for (nb = 0; nb < NB; nb++) { for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*NB*G + nb*G + g]; ds[g*group_size + j] = 0.0f; db[g*group_size + j] = 0.0f; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB]; arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB]; /* arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); */ /* arg_array[5].primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); */ arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* arg_array[7].primary = &c[cp*CB]; */ arg_array[8].primary = &ds[cp*CB]; arg_array[9].primary = &db[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); eqn_param.output.primary = &ds[cp*CB]; ds_func(&eqn_param); eqn_param.output.primary = &db[cp*CB]; db_func(&eqn_param); /* eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); */ eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB]; dgamma_func(&eqn_param); /* eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); */ eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB]; dbeta_func(&eqn_param); } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*NB*G + nb*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*NB*G + nb*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; /* arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); */ /* arg_array[5].primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); */ arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = &c[cp*CB]; /* arg_array[8].primary = &ds[cp*CB]; */ /* arg_array[9].primary = &db[cp*CB]; */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, din, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); din_func(&eqn_param); } } } } int cp; #pragma omp for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } } void tpp_groupnorm_bwd_bf16(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pdout, libxsmm_bfloat16 *pinp, float *mean, float *var, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pdin, float *pdgamma, float *pdbeta, libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func, float eps) { int group_size; group_size = (CP*CB)/G; const float scale = 1.0f / ((float)CP*HW*CB); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, din, pdin, CP, NB, HW, CB); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, inp, pinp, CP, NB, HW, CB); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dout, pdout, CP, NB, HW, CB); LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64); LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64); #pragma omp parallel { LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); int np; #pragma omp for for (np = 0; np < NP; np++) { int j, nb, g, cp, hwb; for(j = 0; j < CP*CB; j++){ dgamma_NP[np*CP*CB + j] = 0.0f; dbeta_NP[np*CP*CB + j] = 0.0f; } libxsmm_matrix_eqn_param eqn_param; libxsmm_matrix_arg arg_array[10]; eqn_param.inputs = arg_array; for (nb = 0; nb < NB; nb++) { for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*NB*G + nb*G + g]; ds[g*group_size + j] = 0.0f; db[g*group_size + j] = 0.0f; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB]; arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = &ds[cp*CB]; arg_array[9].primary = &db[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); eqn_param.output.primary = &ds[cp*CB]; ds_func(&eqn_param); eqn_param.output.primary = &db[cp*CB]; db_func(&eqn_param); eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB]; dgamma_func(&eqn_param); eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB]; dbeta_func(&eqn_param); } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*NB*G + nb*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*NB*G + nb*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = &c[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, din, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); din_func(&eqn_param); } } } } int cp; #pragma omp for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } } void scaler_groupnorm_fwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps){ LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB); /* [NP, CP, NB, HW, CB] */ LIBXSMM_VLA_DECL(5, float, out, pout, CP, NB, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB); int np, group_size; group_size = (CP*CB)/G; #pragma omp parallel for for(np = 0; np < NP; np++){ LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, cp, cb, hw, nb, g; float m, v, value; for(nb = 0; nb < NB; nb++){ for(g = 0; g < G; g++){ sum_X[g] = 0.0f; sum_X2[g] = 0.0f; } for(cp = 0; cp < CP; cp++){ /* Size = CP*HW*CB*4 */ m = 0.0f; v = 0.0f; if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ for(cb = 0; cb < CB; cb++){ for(hw = 0; hw < HW; hw++){ value = LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB); m += value; v += (value*value); } } g = (cp*CB)/group_size; /* determine current group */ sum_X[g] += m; sum_X2[g] += v; } else{ for(i=0; i < CB; i += group_size){ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(j = 0; j < group_size; j++){ for(hw = 0; hw < HW; hw++){ value = LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, (i + j), CP, NB, HW, CB); sum_X[cp*(CB/group_size) + (i/group_size)] += value; sum_X2[cp*(CB/group_size) + (i/group_size)] += (value*value); } } } } } for(g = 0; g < G; g++){ /* mean and variance calculation */ /* Size = 2*CP*CB*4 */ mean[np*NB*G + nb*G + g] = sum_X[g] / ((float)group_size * HW); var[np*NB*G + nb*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*NB*G + nb*G + g]*mean[np*NB*G + nb*G + g]); /* var = E[X^2] - (E[X])^2 [G] */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); /* s = 1/sqrt(var(X) + eps) [CP, CB] */ b[g*group_size + j] = -1 * mean[np*NB*G + nb*G + g] * s[g*group_size + j]; /* b = -E[X]/sqrt(var(X) + eps) [CP, CB] */ } } for(cp = 0; cp < CP; cp++){ /* Size = 2*CP*HW*CB*4 + 2*CP*CB*4 */ for(cb = 0; cb < CB; cb++){ for(hw = 0; hw < HW; hw++){ value = LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB); value = ((value * s[cp*CB + cb]) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + LIBXSMM_VLA_ACCESS(2, beta, cp, cb, CB); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ LIBXSMM_VLA_ACCESS(5, out, np, cp, nb, hw, cb, CP, NB, HW, CB) = value; } } } } /* end loops */ } /*End multithreading loop*/ } void scaler_groupnorm_bwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, float eps) { int np, group_size; group_size = (CP*CB)/G; float scale = 1.0f / (CP * HW* CB); LIBXSMM_VLA_DECL(5, float, din, pdin, CP, NB, HW, CB); LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB); LIBXSMM_VLA_DECL(5, float, dout, pdout, CP, NB, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64); LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64); #pragma omp parallel for for(np = 0; np < NP; np++){ int j, nb, cp, cb, hw, g; LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); for(j = 0; j < CP*CB; j++){ dgamma_NP[np*CP*CB + j] = 0.0f; dbeta_NP[np*CP*CB + j] = 0.0f; } for (nb = 0; nb < NB; nb++) { for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*NB*G + nb*G + g]; ds[g*group_size + j] = 0.0f; db[g*group_size + j] = 0.0f; } } for (cp = 0; cp < CP; cp++) { /* dgamma += (a * inp + b) * dout , dbeta += dout, ds += dout * gamma * inp, db += dout * gamma */ /* Size = 2*CP*HW*CB*4 */ for (cb = 0; cb < CB; cb++) { for (hw = 0; hw < HW; hw++){ dgamma_NP[np*CP*CB + cp*CB + cb] += (a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB); dbeta_NP[np*CP*CB + cp*CB + cb] += LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB); ds[cp*CB + cb] += LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB); db[cp*CB + cb] += LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB); } } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*NB*G + nb*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*NB*G + nb*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { /* din = dout * a * gamma + b * inp + c */ /* Size = 3*CP*HW*CB*4 */ for (cb = 0; cb < CB; cb++) { for (hw = 0; hw < HW; hw++){ LIBXSMM_VLA_ACCESS(5, din, np, cp, nb, hw, cb, CP, NB, HW, CB) = LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB) * a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + b[cp*CB + cb] * LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB) + c[cp*CB + cb]; } } } } } int cp; #pragma omp parallel for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } int main( int argc, char* argv[] ) { libxsmm_blasint my_eqn10, my_eqn11, my_eqn12, my_eqn13, my_eqn14, my_eqn15; libxsmm_matrix_eqn_function func10, func11, func12, func13, func14, func15; libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type; libxsmm_meltwfunction_unary reduce_rows_kernel, reduce_HW_kernel, reduce_groups_kernel; const float eps = FLT_EPSILON; libxsmm_blasint i, it, ld, tmp_ld, tmp_ld2; unsigned long long l_start, l_end; double l_total = 0, l_total2 = 0; double t_vec = 0, t_tpp = 0; libxsmm_matdiff_info norms_out; float *inp, *out, *dinp, *dout, *eqn_dinp, *eqn_dout, *dbeta, *eqn_dbeta, *dgamma, *eqn_dgamma, *eqn_out, *gamma, *beta, *cache_fl, *mean, *var, sum = 0.0; libxsmm_bfloat16 *bf16_inp, *bf16_out, *bf16_dinp, *bf16_dout, *bf16_eqn_dinp, *bf16_eqn_dout, *bf16_gamma, *bf16_beta, *bf16_eqn_out; int NP = 28; int CP = 2; int NB = 1; int HW = 784; int CB = 64; int G = 1; long num_HW_blocks = 16; int datatype_mode = 0; int iters = 100; libxsmm_datatype in_dt = LIBXSMM_DATATYPE_F32; libxsmm_datatype out_dt = LIBXSMM_DATATYPE_F32; if ( argc > 1 ) NP = atoi(argv[1]); if ( argc > 2 ) CP = atoi(argv[2]); if ( argc > 3 ) NB = atoi(argv[3]); if ( argc > 4 ) HW = atoi(argv[4]); if ( argc > 5 ) CB = atoi(argv[5]); if ( argc > 6 ) G = atoi(argv[6]); if ( argc > 7 ) num_HW_blocks = atoi(argv[7]); if ( argc > 8 ) datatype_mode = atoi(argv[8]); if ( argc > 9 ) iters = atoi(argv[9]); if (datatype_mode == 0) { in_dt = LIBXSMM_DATATYPE_F32; out_dt = LIBXSMM_DATATYPE_F32; } else if (datatype_mode == 1) { in_dt = LIBXSMM_DATATYPE_BF16; out_dt = LIBXSMM_DATATYPE_BF16; } else { printf("ERROR: Supporting only FP32 and BF16 precisions...\n"); } inp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); eqn_dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); eqn_dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); eqn_dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); eqn_dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); gamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); beta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); mean = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*NB*G, 2097152); var = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*NB*G, 2097152); eqn_out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152); cache_fl = (float*) libxsmm_aligned_malloc( sizeof(float)*1024*1024, 2097152); bf16_inp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); bf16_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); bf16_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); bf16_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); bf16_eqn_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); bf16_eqn_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); bf16_gamma = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152); bf16_beta = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152); bf16_eqn_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152); libxsmm_init(); libxsmm_matdiff_clear(&norms_out); /* Initializing arrays */ for ( i = 0; i < NP*CP*NB*HW*CB; ++i ) { inp[i] = (float)libxsmm_rng_f64(); out[i] = (float)libxsmm_rng_f64(); eqn_out[i] = out[i]; dinp[i] = (float)libxsmm_rng_f64(); dout[i] = (float)libxsmm_rng_f64(); eqn_dinp[i] = dinp[i]; eqn_dout[i] = dout[i]; libxsmm_rne_convert_fp32_bf16( &inp[i], &bf16_inp[i], 1 ); libxsmm_rne_convert_fp32_bf16( &out[i], &bf16_out[i], 1 ); libxsmm_rne_convert_fp32_bf16( &eqn_out[i], &bf16_eqn_out[i], 1 ); libxsmm_rne_convert_fp32_bf16( &dout[i], &bf16_dout[i], 1 ); libxsmm_rne_convert_fp32_bf16( &eqn_dout[i], &bf16_eqn_dout[i], 1 ); libxsmm_rne_convert_fp32_bf16( &dinp[i], &bf16_dinp[i], 1 ); libxsmm_rne_convert_fp32_bf16( &eqn_dinp[i], &bf16_eqn_dinp[i], 1 ); } for ( i = 0; i < CP*CB; ++i ) { gamma[i] = (float)libxsmm_rng_f64(); beta[i] = (float)libxsmm_rng_f64(); dbeta[i] = (float)libxsmm_rng_f64(); dgamma[i] = (float)libxsmm_rng_f64(); eqn_dbeta[i] = dbeta[i]; eqn_dgamma[i] = dgamma[i]; libxsmm_rne_convert_fp32_bf16( &gamma[i], &bf16_gamma[i], 1 ); libxsmm_rne_convert_fp32_bf16( &beta[i], &bf16_beta[i], 1 ); } for (i = 0; i < 1024 * 1024; i++ ) { cache_fl[i] = (float)libxsmm_rng_f64(); } libxsmm_blasint ldo = G; libxsmm_meltwfunction_unary all_zero_G_kernel = libxsmm_dispatch_meltw_unary(G, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( all_zero_G_kernel == NULL) { fprintf( stderr, "JIT for initialization by unary all zero group copy kernel failed. Bailing...!\n"); exit(-1); } /* TPPs for reducing X and X2 in HW*/ ld = CB; tmp_ld = CB; unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD; jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; reduce_HW_kernel = libxsmm_dispatch_meltw_unary(CB, HW/num_HW_blocks, &ld, &tmp_ld, in_dt, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type); /* TPP for reducing groups */ libxsmm_blasint group_size = (CP*CB)/G; ld = group_size; /* group_size = (CP*CB)/G */ tmp_ld = 1; unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD; jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; reduce_groups_kernel = libxsmm_dispatch_meltw_unary(group_size, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type); ld = CB; tmp_ld = 1; unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD; jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; reduce_rows_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type); /* TPP for scaling */ ld = CB; tmp_ld = 1; tmp_ld2 = 1; my_eqn10 = libxsmm_matrix_eqn_create(); /* y = (s*x + b)*gamma + beta */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* x = [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 1, 0, LIBXSMM_DATATYPE_F32 ); /* s = [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b = [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 3, 0, in_dt ); /* gamma = [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 4, 0, in_dt ); /* beta = [CB] */ func10 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, out_dt, my_eqn10 ); /* y = [HW, CB] */ /* Check correctness */ if (datatype_mode == 0) { scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps); tpp_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel); } else if (datatype_mode == 1) { scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps); tpp_groupnorm_fwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel); for ( i = 0; i < NP*CP*NB*HW*CB; ++i ) { /* out[i] = upconvert_bf16(bf16_out[i]); */ eqn_out[i] = upconvert_bf16(bf16_eqn_out[i]); } } /* compare */ printf("############################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 FWD Groupnorm - Output #\n"); } else { printf("# Correctness BF16 FWD Groupnorm - Output #\n"); } printf("############################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*NB*HW*CB, 1, out, eqn_out, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); if (datatype_mode == 0) { for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i]; } scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler time FWD = %.5g\n", ((double)(l_total))); for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i] + (float)l_total; } tpp_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP groupnorm time FWD = %.5g\n", ((double)(l_total2))); printf("Speedup FWD is %.5g\n", l_total/l_total2); } else if (datatype_mode == 1) { for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i]; } scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler FP32 groupnorm time FWD = %.5g\n", ((double)(l_total))); for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i] + (float)l_total; } tpp_groupnorm_fwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_fwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP BF16 groupnorm time FWD = %.5g\n", ((double)(l_total2))); printf("Speedup FWD is %.5g\n", l_total/l_total2); } t_tpp = l_total2; t_vec = l_total; /* Group norm equations */ /* Create MatEq for bwd layernorm */ ld = CB; tmp_ld2 = 1; /* dgamma function */ my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */ libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* dgamma = ((inp *a + b) * dout) + dgamma */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn11, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* ((inp *a + b) * dout) */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn11, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 4, 0, LIBXSMM_DATATYPE_F32 ); /* dgamma [CB] */ func11 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn11 ); /* dgamma [CB] */ /* dbeta function */ my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [CB] = dout [HW, CB] + dbeta [CB] */ libxsmm_matrix_eqn_push_back_binary_op( my_eqn12, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* dbeta_tmp [HW, CB] */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn12, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, 1, 1, 5, 0, LIBXSMM_DATATYPE_F32 ); /* dbeta [CB] */ func12 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn12 ); /* dbeta [CB] */ /* db new equation */ my_eqn13 = libxsmm_matrix_eqn_create(); /* db [CB] = (dout * gamma) [HW, CB] + db [CB]*/ libxsmm_matrix_eqn_push_back_binary_op(my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* db [CB] */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn13, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_binary_op( my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 9, 0, LIBXSMM_DATATYPE_F32 ); /* db [CB] */ func13 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn13 ); /* db [CB] */ /* ds new equation */ my_eqn14 = libxsmm_matrix_eqn_create(); /* ds [CB] = ((dout * gamma) * inp) [HW, CB] + ds [CB] */ libxsmm_matrix_eqn_push_back_binary_op(my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn14, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 ); /*(dout * gamma)*/ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 8, 0, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */ func14 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn14 ); /* ds [CB] */ /* din equation */ my_eqn15 = libxsmm_matrix_eqn_create(); /* din = ((gamma * a) * dout) + (inp * b + c) */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_binary_op( my_eqn15, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 7, 0, LIBXSMM_DATATYPE_F32 ); /* c [CB] */ func15 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, in_dt, my_eqn15 ); /* din [HW, CB] */ if (datatype_mode == 0) { scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); tpp_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps); } else if (datatype_mode == 1) { scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); tpp_groupnorm_bwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps); for ( i = 0; i < NP*CP*NB*HW*CB; ++i ) { /* dinp[i] = upconvert_bf16(bf16_dinp[i]); */ eqn_dinp[i] = upconvert_bf16(bf16_eqn_dinp[i]); } } /* compare */ printf("############################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 BWD Groupnorm - Dinput #\n"); } else { printf("# Correctness BF16 BWD Groupnorm - Dinput #\n"); } printf("############################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*NB*HW*CB, 1, dinp, eqn_dinp, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); printf("###########################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 BWD Groupnorm - Dbeta #\n"); } else { printf("# Correctness BF16 BWD Groupnorm - Dbeta #\n"); } printf("###########################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dbeta, eqn_dbeta, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); printf("############################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 BWD Groupnorm - Dgamma #\n"); } else { printf("# Correctness BF16 BWD Groupnorm - Dgamma #\n"); } printf("############################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dgamma, eqn_dgamma, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); if (datatype_mode == 0) { for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i]; } scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler groupnorm time BWD = %.5g\n", ((double)(l_total))); for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i] + (float)l_total; } tpp_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP groupnorm time BWD = %.5g\n", ((double)(l_total2))); printf("Speedup BWD is %.5g\n", l_total/l_total2); } else if (datatype_mode == 1) { for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i]; } scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler FP32 groupnorm time BWD = %.5g\n", ((double)(l_total))); for (i = 0; i < 1024 * 1024; i++ ) { sum += cache_fl[i] + (float)l_total; } tpp_groupnorm_bwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_bwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, eps); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP BF16 groupnorm time BWD = %.5g\n", ((double)(l_total2))); printf("Speedup BWD is %.5g\n", l_total/l_total2); } /* printf("Running sum is %.5f\n", sum); */ t_tpp += l_total2; t_vec += l_total; printf("\n\n=================================\n"); printf("Total Speedup via TPP Matrix equation is %.5g\n", t_vec/t_tpp); printf("=================================\n"); libxsmm_free(inp); libxsmm_free(out); libxsmm_free(dinp); libxsmm_free(dout); libxsmm_free(eqn_dinp); libxsmm_free(eqn_dout); libxsmm_free(bf16_dinp); libxsmm_free(bf16_dout); libxsmm_free(bf16_eqn_dinp); libxsmm_free(bf16_eqn_dout); libxsmm_free(dgamma); libxsmm_free(dbeta); libxsmm_free(eqn_dgamma); libxsmm_free(eqn_dbeta); libxsmm_free(mean); libxsmm_free(var); libxsmm_free(gamma); libxsmm_free(beta); libxsmm_free(eqn_out); libxsmm_free(bf16_inp); libxsmm_free(bf16_out); libxsmm_free(bf16_gamma); libxsmm_free(bf16_beta); libxsmm_free(bf16_eqn_out); libxsmm_free(cache_fl); return 0; }
benchmarking.c
/** * @brief find a number of points and measure how long it takes on the avaliable core * */ void run_benchmark( double *points_ratio, instance_t *instance, uint64_t target_number_of_points) { #ifdef RUN_ACTUAL_BENCHMARKING double total_time = 0.; double *benchmark; shared_state_t benchmark_S; #ifdef STORE_IN_DATABASE db_settings_t db_settings = load_db_settings(); #endif // Init a state from an instance, and set the number of threads to 1 benchmark_S = init_shared_state(instance #ifdef STORE_IN_DATABASE , &db_settings #endif ); benchmark_S.N_OF_CORES = 1; // Stop at a certain number of distinguished points! #if !(defined(VOW_SIKE) || defined(VOW_SIDH)) prng_state_t prng_state; init_prng(&prng_state, benchmark_S.PRNG_SEED); sample_prng(&prng_state, benchmark_S.image.bytes, (unsigned long)benchmark_S.NBYTES_STATE); fix_overflow(&benchmark_S.image, benchmark_S.NBYTES_STATE, benchmark_S.NBITS_OVERFLOW); #endif // Run benchmark // Explicitly disable dynamic teams omp_set_dynamic(0); // Allocate memory for benchmark benchmark = (double *)malloc(instance->N_OF_CORES * sizeof(double)); if (benchmark == NULL) { fprintf(stderr, "error: could not allocate memory for the benchmarks"); assert(0); } // run benchmark on each core #pragma omp parallel num_threads(instance->N_OF_CORES) { int thread_id = omp_get_thread_num(); bool success; private_state_t private_state = init_private_state(&benchmark_S); initialize_private_memory(&benchmark_S, &private_state); trip_t t = init_trip(private_state.NWORDS_STATE); double wall_time = omp_get_wtime(); for (uint64_t i = 0; i < target_number_of_points; i++) { (void)vOW_one_iteration(&benchmark_S, &private_state, &t, &success, 1.); } wall_time = omp_get_wtime() - wall_time; // Save result benchmark[thread_id] = wall_time; #pragma omp atomic total_time += wall_time; // Free memory free_trip(&t); cleanup_private_memory(&private_state); free_private_state(&private_state); } // Send benchmark information to database and recover ratios #ifdef STORE_IN_DATABASE fprintf(stderr, "error: rrprf_from_benchmark not implemented\n"); #else for (uint64_t i = 0; i < instance->N_OF_CORES; i++) { points_ratio[i] = benchmark[i] / total_time; } #endif // Free benchmark_S without freeing the buffers free(benchmark); free_shared_state(&benchmark_S); #else (void)target_number_of_points; for (uint64_t i = 0; i < instance->N_OF_CORES; i++) { points_ratio[i] = 1. / (double)instance->N_OF_CORES; } #endif }
rose_heat_serial_OpenMP.c
#include <omp.h> /* This code si contributed by Richard T. Evans at the Texas Advanced computing Center * The University of Texas at Austin * * To compile: icc -o heat heat_serial.c calc_up.c */ #include <stdio.h> #include <sys/time.h> #include "calc_up.h" int main() { int Nx; int Ny; int Nt; int t; int x; int y; Nx = 1000; Ny = 1000; Nt = 1000; double u[Nx][Ny]; double up[Nx][Ny]; struct timeval start; struct timeval end; float delta; // Boundary conditions for (x = 0; x < Nx; x++) for (y = 0; y < Ny; y++) { if (x == 0) u[x][y] = 1.0; else u[x][y] = 0.0; } gettimeofday(&start,0); //////////////////////////////////////////////////////////////////////// // Finite difference algorithm - iterate over time to reach steady state //////////////////////////////////////////////////////////////////////// for (t = 0; t < Nt; t++) { #pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y) { #pragma omp for for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) calc_up(x,y,Nx,Ny,u,up); } #pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y) { #pragma omp for for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) u[x][y] = up[x][y]; } } gettimeofday(&end,0); delta = (((((end.tv_sec - start.tv_sec) * 1000000u) + end.tv_usec) - start.tv_usec) / 1.e6); double sum = 0; for (y = 0; y < Ny; y++) { for (x = 0; x < Nx; x++) { sum += u[x][y]; } } printf("run time = %fs\n",delta); printf("sum of u = %f\n",sum); return 0; }
internal.c
void foo1(); void foo2(); void foo3(); int main() { #pragma omp parallel { foo1(); } #pragma omp parallel { foo2(); } #pragma omp parallel { foo3(); } }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morpology is the the application of various kernels, of any size and even % shape, to a image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/color-private.h" #include "magick/channel.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/prepress.h" #include "magick/quantize.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" /* Other global definitions used by module. */ static inline double MagickMin(const double x,const double y) { return( x < y ? x : y); } static inline double MagickMax(const double x,const double y) { return( x > y ? x : y); } #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t l,f; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MaxTextExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *)NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetMagickToken(p,&p,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string) { char token[MaxTextExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetMagickToken(kernel_string,&p,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *)NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string) { KernelInfo *kernel, *new_kernel; char token[MaxTextExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel=NULL; while (GetMagickToken(p,NULL,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0, 2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *)NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1, sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (< 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +MagickSQ2; kernel->values[5] = kernel->values[7]= -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19"); if (kernel == (KernelInfo *) NULL) return(kernel); break; case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +MagickSQ2; kernel->values[7] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +MagickSQ2; kernel->values[8] = -MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -MagickSQ2; kernel->values[6] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>")); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>")); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;"); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo( "ThinSE:41; ThinSE:42; ThinSE:43"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (new_kernel->values == (double *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(double *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone, *last; last = kernel; DisableMSCWarning(4127) while(1) { RestoreMSCWarning clone = CloneKernelInfo(last); RotateKernelInfo(clone, angle); if ( SameKernelInfo(kernel, clone) != MagickFalse ) break; LastKernelInfo(last)->next = clone; last = clone; } clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but % without any user controls. This allows internel programs to use this % function, to actually perform a specific task without possible interference % by any API user supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ChannelType channel, const ssize_t iterations, % const KernelInfo *kernel, const CompositeMethod compose, % const double bias, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o channel: the channels to which the operations are applied % The channel 'sync' flag determines if 'alpha weighting' is % applied for convolution style operations. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ /* Apply a Morphology Primative to an image using the given kernel. ** Two pre-created images must be provided, and no image is created. ** It returns the number of pixels that changed between the images ** for result convergence determination. */ static ssize_t MorphologyPrimitive(const Image *image, Image *result_image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,const double bias,ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *p_view, *q_view; register ssize_t i; size_t *changes, changed, virt_width; ssize_t y, offx, offy; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(result_image != (Image *) NULL); assert(result_image->signature == MagickSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); status=MagickTrue; progress=0; p_view=AcquireVirtualCacheView(image,exception); q_view=AcquireAuthenticCacheView(result_image,exception); virt_width=image->columns+kernel->width-1; /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* kernel is used as is, without reflection */ break; default: assert("Not a Primitive Morphology Method" != (char *) NULL); break; } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changes[i]=0; if ( method == ConvolveMorphology && kernel->width == 1 ) { /* Special handling (for speed) of vertical (blur) kernels. ** This performs its handling in columns rather than in rows. ** This is only done for convolve as it is the only method that ** generates very large 1-D vertical kernels (such as a 'BlurKernel') ** ** Timing tests (on single CPU laptop) ** Using a vertical 1-d Blue with normal row-by-row (below) ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.807u ** Using this column method ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.620u ** ** Anthony Thyssen, 14 June 2010 */ register ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,result_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t y; ssize_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1, exception); q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = offy; for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket result; register ssize_t v; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+r)); /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; result.red += (*k)*GetPixelRed(k_pixels); result.green += (*k)*GetPixelGreen(k_pixels); result.blue += (*k)*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += (*k)*(*k_indexes); k--; k_pixels++; k_indexes++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+y,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double gamma; /* divisor, sum of color alpha weighting */ MagickRealType alpha; /* alpha weighting for colors : alpha */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*GetPixelRed(k_pixels); result.green += alpha*GetPixelGreen(k_pixels); result.blue += alpha*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += alpha*(*k_indexes); k--; k_pixels++; k_indexes++; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); gamma*=(double) kernel->height/count; SetPixelRed(q,ClampToQuantum(gamma*result.red)); SetPixelGreen(q,ClampToQuantum(gamma*result.green)); SetPixelBlue(q,ClampToQuantum(gamma*result.blue)); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index)); } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q)) || ( p[r].green != GetPixelGreen(q)) || ( p[r].blue != GetPixelBlue(q)) || ( p[r].opacity != GetPixelOpacity(q)) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+y) ) ) changes[id]++; p++; q++; } /* y */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* x */ result_image->type=image->type; q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* ** Normal handling of horizontal or rectangular kernels (row by row) */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,result_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t x; size_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width, kernel->height, exception); q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; DoublePixelPacket result, min, max; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+r)); /* Defaults */ min.red = min.green = min.blue = min.opacity = min.index = (double) QuantumRange; max.red = max.green = max.blue = max.opacity = max.index = 0.0; /* default result is the original pixel value */ result.red = (double) p[r].red; result.green = (double) p[r].green; result.blue = (double) p[r].blue; result.opacity = QuantumRange - (double) p[r].opacity; result.index = 0.0; if ( image->colorspace == CMYKColorspace) result.index = (double) GetPixelIndex(p_indexes+r); switch (method) { case ConvolveMorphology: /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; break; case DilateIntensityMorphology: case ErodeIntensityMorphology: /* use a boolean flag indicating when first match found */ result.red = 0.0; /* result is not used otherwise */ break; default: break; } switch ( method ) { case ConvolveMorphology: /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** Correlation is actually the same as this but without reflecting ** the kernel, and thus 'lower-level' that Convolution. However ** as Convolution is the more common method used, and it does not ** really cost us much in terms of processing to use a reflected ** kernel, so it is Convolution that is implemented. ** ** Correlation will have its kernel reflected before calling ** this function to do a Convolve. ** ** For more details of Correlation vs Convolution see ** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; result.red += (*k)*k_pixels[u].red; result.green += (*k)*k_pixels[u].green; result.blue += (*k)*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index += (*k)*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum((MagickRealType) result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double alpha, /* alpha weighting for colors : alpha */ gamma; /* divisor, sum of color alpha weighting */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*k_pixels[u].red; result.green += alpha*k_pixels[u].green; result.blue += alpha*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index+=alpha*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); gamma*=(double) kernel->height*kernel->width/count; SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue))); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma* result.index))); } break; case ErodeMorphology: /* Minimum Value within kernel neighbourhood ** ** NOTE that the kernel is not reflected for this operation! ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateMorphology: /* Maximum Value within kernel neighbourhood ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. ** */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* Minimum of Foreground Pixel minus Maxumum of Background Pixels ** ** NOTE that the kernel is not reflected for this operation, ** and consists of both foreground and background pixel ** neighbourhoods, 0.0 for background, and 1.0 for foreground ** with either Nan or 0.5 values for don't care. ** ** Note that this will never produce a meaningless negative ** result. Such results can cause Thinning/Thicken to not work ** correctly when used against a greyscale image. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) ) continue; if ( (*k) > 0.7 ) { /* minimim of foreground pixels */ Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } else if ( (*k) < 0.3 ) { /* maximum of background pixels */ Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } } k_pixels += virt_width; k_indexes += virt_width; } /* Pattern Match if difference is positive */ min.red -= max.red; Maximize( min.red, 0.0 ); min.green -= max.green; Maximize( min.green, 0.0 ); min.blue -= max.blue; Maximize( min.blue, 0.0 ); min.opacity -= max.opacity; Maximize( min.opacity, 0.0 ); min.index -= max.index; Maximize( min.index, 0.0 ); break; case ErodeIntensityMorphology: /* Select Pixel with Minimum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity. ** ** NOTE that the kernel is not reflected for this operation! */ k = kernel->values; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateIntensityMorphology: /* Select Pixel with Maximum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity (yet). ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */ if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case IterativeDistanceMorphology: /* Work out an iterative distance from black edge of a white image ** shape. Essentually white values are decreased to the smallest ** 'distance from edge' it can find. ** ** It works by adding kernel values to the neighbourhood, and and ** select the minimum value found. The kernel is rotated before ** use, so kernel distances match resulting distances, when a user ** provided asymmetric kernel is applied. ** ** ** This code is almost identical to True GrayScale Morphology But ** not quite. ** ** GreyDilate Kernel values added, maximum value found Kernel is ** rotated before use. ** ** GrayErode: Kernel values subtracted and minimum value found No ** kernel rotation used. ** ** Note the the Iterative Distance method is essentially a ** GrayErode, but with negative kernel values, and kernel ** rotation applied. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case UndefinedMorphology: default: break; /* Do nothing */ } /* Final mathematics of results (combine with original image?) ** ** NOTE: Difference Morphology operators Edge* and *Hat could also ** be done here but works better with iteration as a image difference ** in the controlling function (below). Thicken and Thinning however ** should be done here so thay can be iterated correctly. */ switch ( method ) { case HitAndMissMorphology: case ErodeMorphology: result = min; /* minimum of neighbourhood */ break; case DilateMorphology: result = max; /* maximum of neighbourhood */ break; case ThinningMorphology: /* subtract pattern match from original */ result.red -= min.red; result.green -= min.green; result.blue -= min.blue; result.opacity -= min.opacity; result.index -= min.index; break; case ThickenMorphology: /* Add the pattern matchs to the original */ result.red += min.red; result.green += min.green; result.blue += min.blue; result.opacity += min.opacity; result.index += min.index; break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case UndefinedMorphology: case ConvolveMorphology: case DilateIntensityMorphology: case ErodeIntensityMorphology: break; /* full pixel was directly assigned - not a channel method */ default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if ((channel & OpacityChannel) != 0 && image->matte != MagickFalse ) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( p[r].opacity != GetPixelOpacity(q) ) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changes[id]++; p++; q++; } /* x */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* y */ q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t)changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, ** but will apply the primitive directly to the actual image using two ** passes, once in each direction, with the results of the previous (and ** current) row being re-used. ** ** That is after each row is 'Sync'ed' into the image, the next row will ** make use of those values as part of the calculation of the next row. ** It then repeats, but going in the oppisite (bottom-up) direction. ** ** Because of this 're-use of results' this function can not make use ** of multi-threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,ExceptionInfo *exception) { CacheView *auth_view, *virt_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y, offx, offy; size_t changed, virt_width; status=MagickTrue; changed=0; progress=0; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case DistanceMorphology: case VoronoiMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; #if 0 case ?????Morphology: /* kernel is used as is, without reflection */ break; #endif default: assert("Not a PrimativeDirect Morphology Method" != (char *) NULL); break; } /* DO NOT THREAD THIS CODE! */ /* two views into same image (virtual, and actual) */ virt_view=AcquireVirtualCacheView(image,exception); auth_view=AcquireAuthenticCacheView(image,exception); virt_width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t x; ssize_t r; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only top half of kernel is processed as we do a single pass downward ** through the image iterating the distance function as we go. */ if (status == MagickFalse) break; p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = (ssize_t) virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; MagickPixelPacket result; /* Starting Defaults */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, while coping the color ** values of the closest pixel. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel so that alpha can ** also be used as part of the results. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( p[r].opacity != GetPixelOpacity(q) ) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changed++; /* The pixel was changed in some way! */ p++; /* increment pixel buffers */ q++; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) if ( SetImageProgress(image,MorphologyTag,progress++,image->rows) == MagickFalse ) status=MagickFalse; } /* y */ /* Do the reversed pass through the image */ for (y=(ssize_t)image->rows-1; y >= 0; y--) { register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t x; ssize_t r; if (status == MagickFalse) break; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only the bottom half of the kernel will be processes as we ** up the image. */ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* adjust positions to end of row */ p += image->columns-1; q += image->columns-1; /* offset to origin in 'p'. while 'q' points to it directly */ r = offx; for (x=(ssize_t)image->columns-1; x >= 0; x--) { ssize_t v; register ssize_t u; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; MagickPixelPacket result; /* Default - previously modified pixel */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, coping the closest color. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( p[r].opacity != GetPixelOpacity(q) ) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changed++; /* The pixel was changed in some way! */ p--; /* go backward through pixel buffers */ q--; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) if ( SetImageProgress(image,MorphologyTag,progress++,image->rows) == MagickFalse ) status=MagickFalse; } /* y */ auth_view=DestroyCacheView(auth_view); virt_view=DestroyCacheView(virt_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive ** application functions. This function handles any iteration loops, ** composition or re-iteration of results, and compound morphology methods ** that is based on multiple low-level (staged) morphology methods. ** ** Basically this provides the complex grue between the requested morphology ** method and raw low-level implementation (above). */ MagickExport Image *MorphologyApply(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose, const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MaxTextExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *)NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsMagickTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse) { InheritException(exception,&rslt_image->exception); goto error_cleanup; } changed = MorphologyPrimitiveDirect(rslt_image, method, channel, kernel, exception); if ( verbose != MagickFalse ) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); (void) CompositeImageChannel(rslt_image, DefaultChannels, CopyOpacityCompositeOp, image, 0, 0); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if ( verbose != MagickFalse ) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass) == MagickFalse) { InheritException(exception,&work_image->exception); goto error_cleanup; } /* work_image->type=image->type; ??? */ } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, channel, this_kernel, bias, exception); if ( verbose != MagickFalse ) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if ( verbose != MagickFalse && kernel_changed != (size_t)changed ) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if ( verbose != MagickFalse && stage_loop < stage_limit ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image", CommandOptionToMnemonic(MagickMorphologyOptions, method) ); (void) CompositeImageChannel(curr_image, (ChannelType) (channel & ~SyncChannels), DifferenceCompositeOp, image, 0, 0); break; case EdgeMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode", CommandOptionToMnemonic(MagickMorphologyOptions, method) ); (void) CompositeImageChannel(curr_image, (ChannelType) (channel & ~SyncChannels), DifferenceCompositeOp, save_image, 0, 0); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if ( verbose != MagickFalse ) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImageChannel(rslt_image, (ChannelType) (channel & ~SyncChannels), rslt_compose, curr_image, 0, 0); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImageChannel() applies a user supplied kernel to the image % according to the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-bias" or "-define convolve:bias=??") % * Kernel Scale/normalize settings ("-set 'option:convolve:scale'") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-set option:showkernel 1") % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % Image *MorphologyImageChannel(const Image *image, const ChannelType % channel,MorphologyMethod method,const ssize_t iterations, % KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImageChannel(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception) { KernelInfo *curr_kernel; CompositeOperator compose; double bias; Image *morphology_image; /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ curr_kernel = (KernelInfo *) kernel; bias=image->bias; if ( method == ConvolveMorphology || method == CorrelateMorphology ) { const char *artifact; artifact = GetImageArtifact(image,"convolve:bias"); if (artifact != (const char *) NULL) bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *)NULL ) { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) { curr_kernel=DestroyKernelInfo(curr_kernel); return((Image *) NULL); } ScaleGeometryKernelInfo(curr_kernel, artifact); } } /* display the (normalized) kernel via stderr */ if ( IsMagickTrue(GetImageArtifact(image,"showkernel")) || IsMagickTrue(GetImageArtifact(image,"convolve:showkernel")) || IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) ) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { const char *artifact; compose = UndefinedCompositeOp; /* use default for method */ artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) compose = (CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,artifact); } /* Apply the Morphology */ morphology_image = MorphologyApply(image, channel, method, iterations, curr_kernel, compose, bias, exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo *exception) { Image *morphology_image; morphology_image=MorphologyImageChannel(image,DefaultChannels,method, iterations,kernel,exception); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register size_t i,j,x,y; register double *k,t; k=kernel->values; for( i=0, x=kernel->width-1; i<=x; i++, x--) for( j=0, y=kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ double t; register double *k; size_t i, j; k=kernel->values; for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { GeometryFlags flags; GeometryInfo args; SetGeometryInfo(&args); flags = (GeometryFlags) ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register ssize_t i; register double pos_scale, neg_scale; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if ( ! IsNaN(kernel->values[i]) ) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'showkernel' option request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if ( IsNaN(k->values[i]) ) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if ( IsNaN(kernel->values[i]) ) kernel->values[i] = 0.0; return; }
cycle_share.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1998-2018,2021 Bernard Parent Copyright 2020 Minindu Weerakoon Copyright 2001 Giovanni Fusina Copyright 2002 Thomas E. Schwartzentruber Copyright 2021 Prasanna Thoguluva Rajendran Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cycle/share/cycle_share.h> #include <src/data.h> #include <src/common.h> #include <src/bdry.h> #include <src/init.h> #include <src/post.h> #include <cycle/ts/_ts.h> #include <cycle/tsemf/_tsemf.h> #include <cycle/_cycle.h> #include <cycle/res/_res.h> #include <cycle/resconv/_resconv.h> #include <cycle/restime/_restime.h> #include <model/fluid/_fluid.h> #include <model/emfield/_emfield.h> #include <model/metrics/_metrics.h> #include <model/fluid/_fluid.h> #ifdef OPENMPTHREADS #define maxloopthread LONG_MAX #define maxzonethread LONG_MAX #else #define maxloopthread 256 #define maxzonethread 256 #endif #define MAXRATIO_DTAUMAX_DTAUMIN 100.0 typedef struct { np_t *np; gl_t *gl; long theta,ls,le; } segment_t; typedef struct { np_t *np; gl_t *gl; long theta,ls,le; void (*funct)(np_t *, gl_t *, long, long, long); } segmentarg_t; typedef struct { np_t *np; gl_t *gl; zone_t zone; void (*funct)(np_t *, gl_t *, zone_t); } threadzone_t; void *segmentfunct(void *segmentarg){ (((segmentarg_t *) segmentarg)->funct)( ((segmentarg_t *) segmentarg)->np, ((segmentarg_t *) segmentarg)->gl, ((segmentarg_t *) segmentarg)->theta, ((segmentarg_t *) segmentarg)->ls, ((segmentarg_t *) segmentarg)->le); return(NULL); } void find_musclvarscycle(np_t np, gl_t *gl, musclvarscycle_t musclvars){ find_musclvars(np,gl,musclvars); #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS long flux; // for (flux=0; flux<nf; flux++) musclvars[nf+flux]=musclvars[flux]; for (flux=0; flux<nf; flux++) musclvars[nf+flux]=np.bs->trapezoidalm1[flux]; #endif } static void execute_function_on_all_segments(segmentarg_t *segmentarg, long numsegment, int SEGMENTWORK){ if ( #if !defined(POSIXTHREADS) && !defined(OPENMPTHREADS) TRUE #else (SEGMENTWORK==SEGMENTWORK_LIGHT && segmentarg[0].gl->NOSHORTTHREADS) #endif ){ long cnt; for (cnt=0; cnt<numsegment; cnt++){ segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le); } } else { #ifdef POSIXTHREADS long cnt; void *retval; pthread_t *pthread; pthread=(pthread_t *)malloc((numsegment+3)*sizeof(pthread_t)); for (cnt=0; cnt<numsegment; cnt++){ if (pthread_create(&((pthread)[cnt]), NULL, segmentfunct, (void *)(&(segmentarg[cnt])))) fatal_error("Cannot create thread."); } for (cnt=0; cnt<numsegment; cnt++){ if (pthread_join(pthread[cnt],&retval)) fatal_error("Cannot join thread %ld.",cnt); } free(pthread); #endif #ifdef OPENMPTHREADS long cnt; #pragma omp parallel for private(cnt) schedule(dynamic) for (cnt=0; cnt<numsegment; cnt++){ segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le); } #endif } } static void create_segments(np_t *np, gl_t *gl, long theta, long ls, long le, void funct(np_t *, gl_t *, long, long, long), segmentarg_t *segmentarg, long *cntsegment, bool COUNTFLAG, int TYPELEVEL, bool is_node_valid_local(np_t, int)){ long l,lm1,ls_local,le_local; bool INSIDE; l=ls; ls_local=ls; /* only needed to avoid compiler warning */ INSIDE=FALSE; do { lm1=l; l=_l_plus_one(l,gl,theta); if ((!INSIDE) && (is_node_valid_local(np[l],TYPELEVEL))) { ls_local=lm1; INSIDE=TRUE; } if ((INSIDE) && ((!is_node_valid_local(np[l],TYPELEVEL)) || (l==le))){ le_local=l; if (!COUNTFLAG) { segmentarg[*cntsegment].np=np; segmentarg[*cntsegment].gl=gl; segmentarg[*cntsegment].theta=theta; segmentarg[*cntsegment].ls=_l_plus_one(ls_local,gl,theta); segmentarg[*cntsegment].le=_l_minus_one(le_local,gl,theta); segmentarg[*cntsegment].funct=funct; } (*cntsegment)++; INSIDE=FALSE; } } while (l!=le); if (INSIDE) fatal_error("Problem setting up segments."); } void sweep_with_1D_segments(np_t *np, gl_t *gl, zone_t zone, void funct(np_t *, gl_t *, long, long, long), int sweeptype, int TYPELEVEL, bool is_node_valid_local(np_t, int), int SEGMENTWORK, int GRIDLEVEL){ long j,k,cntsegment,numthread; ifn1D( long i; ) segmentarg_t *segmentarg; int cnt; bool COUNTFLAG; numthread=0; assert(is_zone_in_zone(zone,gl->domain_all)); segmentarg=(segmentarg_t *)malloc(sizeof(segmentarg_t)); /* do this loop twice: the first time just to count.. */ for (cnt=0; cnt<2; cnt++){ if (cnt==0) COUNTFLAG=TRUE; else COUNTFLAG=FALSE; if (!COUNTFLAG) segmentarg=(segmentarg_t *)realloc(segmentarg,numthread*sizeof(segmentarg_t)); /* the first dimension loop */ if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_I) { cntsegment=0; for_2DL(j,zone.js,zone.je){ if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){ for_3DL(k,zone.ks,zone.ke){ if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){ create_segments(np,gl,0,_ai(gl,zone.is-1,j,k),_ai(gl,zone.ie+1,j,k), funct, segmentarg,&cntsegment, (bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } /* the second dimension loop */ #ifdef _2DL if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_J) { cntsegment=0; for_1DL(i,zone.is,zone.ie){ if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){ for_3DL(k,zone.ks,zone.ke){ if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){ create_segments(np,gl,1,_ai(gl,i,zone.js-1,k),_ai(gl,i,zone.je+1,k), funct, segmentarg,&cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } #endif /* the third dimension loop */ #ifdef _3DL if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_K) { cntsegment=0; for_1DL(i,zone.is,zone.ie){ if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){ for_2DL(j,zone.js,zone.je){ if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){ create_segments(np,gl,2,_ai(gl,i,j,zone.ks-1),_ai(gl,i,j,zone.ke+1), funct, segmentarg, &cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } #endif } free(segmentarg); } /* the following first sets the offset to 0, then 1, then -1 */ static long _node_offset_from_cnt(long cnt){ long offset; offset=0; if (cnt==0) offset=0; if (cnt==1) offset=1; if (cnt==2) offset=-1; return(offset); } void update_bdry_node(np_t *np, gl_t *gl, long l){ long dim,dimsgn,l_C,l_B,l_A,l_D; bool BDRYDIRECFOUND; #ifdef _2DL long offset1,offset2,cnt1,cnt2; #endif #ifdef _3D long offset3,cnt3; #endif bool UPDATED; assert(is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)); UPDATED=FALSE; BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_FLUID_WORK, &dim, &dimsgn); if (is_node_link(np[l],TYPELEVEL_FLUID_WORK)) { // in case the boundary node is a link, U has already been updated: simply update the prim variables find_prim_fluid(np, l, gl); UPDATED=TRUE; } if (BDRYDIRECFOUND && !UPDATED){ l_A=l; l_B=_al(gl,l,dim,dimsgn); l_C=_al(gl,l,dim,dimsgn*2); if (is_node_inner(np[_al(gl,l,dim,dimsgn*3)],TYPELEVEL_FLUID_WORK)) l_D=_al(gl,l,dim,dimsgn*3); else l_D=l_C; assert(is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK)); assert(is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK)); update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } /* now, do the corners */ if (!UPDATED) { #ifdef _2D for (cnt1=0; cnt1<=2; cnt1++){ for (cnt2=0; cnt2<=2; cnt2++){ offset1=_node_offset_from_cnt(cnt1); offset2=_node_offset_from_cnt(cnt2); l_C=_all(gl,l,0,offset1*2,1,offset2*2); l_B=_all(gl,l,0,offset1,1,offset2); l_A=l; l_D=l_C; if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK) && is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){ update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } } } #endif #ifdef _3D for (cnt1=0; cnt1<=2; cnt1++){ for (cnt2=0; cnt2<=2; cnt2++){ for (cnt3=0; cnt3<=2; cnt3++){ offset1=_node_offset_from_cnt(cnt1); offset2=_node_offset_from_cnt(cnt2); offset3=_node_offset_from_cnt(cnt3); l_C=_al(gl, _al(gl, _al(gl,l,0,offset1*2), 1,offset2*2), 2,offset3*2); l_B=_al(gl, _al(gl, _al(gl,l,0,offset1), 1,offset2), 2,offset3); l_A=l; l_D=l_C; if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK) && is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){ update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } } } } #endif } if (!UPDATED) { fatal_error("Problem updating boundary node in update_bdry_node() function."); } } void update_bdry_nodes_on_segment(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); update_bdry_node(np, gl, l); thread_lock_node_unset(np,l,THREADTYPE_ZONE); } } } void update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment, SWEEPTYPE_I, TYPELEVEL_FLUID_WORK,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } #ifdef DISTMPI #define numfluidvars (nf+1+max(0,hbw_resconv_fluid-1)*nmc) #define numlinkvars ((hbw_resconv_fluid-1)*nmc) #define DOUBLE_INT_MAX 100000000000000 typedef double sendvars_t[max(nfe,numfluidvars)]; typedef struct { sendvars_t vars; int proc; long l; bool SENT; } sendnode_t; void update_linked_nodes_2(np_t *np, gl_t *gl, int TYPELEVEL){ int rankrecv,numproc,ranksend,thisrank; long i,j,k; zone_t zonesend,zonerecv,zone; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); /* here we need to mpi the linkmusclvars */ for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { // if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) printf("x"); if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){ assert(np[_ai(gl,i,j,k)].numlinkmusclvars!=0); assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL); MPI_Send(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,rankrecv,0,MPI_COMM_WORLD); MPI_Send(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } } if (rankrecv==thisrank) { if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){ MPI_Recv(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL); MPI_Recv(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); } } } } } } } MPI_Barrier(MPI_COMM_WORLD); } void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){ long i,j,k,l1,l2,flux,offset,l,cntlink; MPI_Status MPI_Status1; musclvarscycle_t musclvars; sendvars_t mpivars; int thisrank,numproc,rank2,rank1,thisproc; int packsize,buffersize,bbuffersize; double *buffer,*bbuffer; sendnode_t *sendnode; long numsendvars,numvars,numsend,cntsend,cnt; double *sendvars; int *recvproc; int cntproc; zone_t zone; zone=gl->domain; switch (TYPELEVEL){ case TYPELEVEL_FLUID: numvars=numfluidvars; break; case TYPELEVEL_FLUID_WORK: numvars=numfluidvars; break; #ifdef EMFIELD case TYPELEVEL_EMFIELD: numvars=nfe; break; #endif default: fatal_error("TYPELEVEL can not be set to %d.\n",TYPELEVEL); numvars=0; } sendnode=(sendnode_t *)malloc(sizeof(sendnode_t)); sendvars=(double *)malloc(sizeof(double)); cntsend=0; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Pack_size( 1, MPI_DOUBLE, MPI_COMM_WORLD, &packsize ); recvproc=(int *)malloc((numproc+2)*sizeof(int)); buffersize = min(INT_MAX,nmc*(zone.ie-zone.is)*(zone.je-zone.js)if3DL(*(zone.ke-zone.ks)) * (MPI_BSEND_OVERHEAD + packsize)); buffer = (double *)malloc( buffersize ); MPI_Buffer_attach( buffer, buffersize ); for_ijk(zone,is,js,ks,ie,je,ke){ np[_ai(gl,i,j,k)].numlinkmusclvars=0; } /* first send the packets */ cntsend=0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL)){ #ifdef _CYCLE_MULTIZONE fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function."); #endif #ifdef _CYCLE_MULTIZONE_MARCHING fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function."); #endif if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL)){ for (cntlink=0; cntlink<_num_node_link(np[_ai(gl,i,j,k)],TYPELEVEL); cntlink++){ l1=_ai_all(gl,i,j,k); l2=_node_link(np[_ai(gl,i,j,k)],cntlink,TYPELEVEL); rank1=_node_rank(gl, i, j, k); rank2=_node_rank(gl, _i_all(l2,gl,0), _i_all(l2,gl,1), _i_all(l2,gl,2)); if (rank1==thisrank) { if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->U[flux]; mpivars[nf]=(double)_nodes_between_link_and_bdry_limited(np, gl, _l_from_l_all(gl,l1), l2, TYPELEVEL, max(0,hbw_resconv_fluid-1)); for (offset=1; offset<hbw_resconv_fluid; offset++) { // find_prim_fluid(np, _al_link(np, gl, _l_from_l_all(gl,l1), offset, TYPELEVEL), gl); find_musclvarscycle(np[_al_link(np, gl, _l_from_l_all(gl,l1), l2, offset, TYPELEVEL)], gl, musclvars); for (flux=0; flux<nmc; flux++) mpivars[1+flux+nf+(offset-1)*nmc]=musclvars[flux]; } if (rank1!=rank2){ for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux]; sendnode[cntsend].proc=(int)rank2; sendnode[cntsend].l=l2; sendnode[cntsend].SENT=FALSE; cntsend++; sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t)); } else { /* no need to send with MPI*/ //printf("\n --(%ld,%ld,%ld) %d",i,j,k,thisrank); l=_l_from_l_all(gl,l2); for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux]; assert(np[l].linkmusclvars!=NULL); assert(is_node_bdry(np[l],TYPELEVEL)); assert(is_node_link(np[l],TYPELEVEL)); np[l].numlinkmusclvars=(short)round(mpivars[nf]); for (offset=1; offset<hbw_resconv_fluid; offset++) { for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc]; } } } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<numvars; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->Uemfield[flux]; if (rank1!=rank2) { for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux]; sendnode[cntsend].proc=(int)rank2; sendnode[cntsend].l=l2; sendnode[cntsend].SENT=FALSE; cntsend++; sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t)); } else { /* no need to send with MPI */ for (flux=0; flux<nfe; flux++) np[_l_from_l_all(gl,l2)].bs->Uemfield[flux]=mpivars[flux]; } } #endif } } } } } numsend=cntsend; /* send nodes in block one proc at a time */ do { thisproc=-1; numsendvars=0; for (cntsend=0; cntsend<numsend; cntsend++){ if (thisproc==-1 && !sendnode[cntsend].SENT) thisproc=sendnode[cntsend].proc; if (sendnode[cntsend].proc==thisproc){ assert(!sendnode[cntsend].SENT); sendvars=(double *)realloc(sendvars,(numsendvars+2*numvars)*sizeof(double)); for (flux=0; flux<numvars; flux++) sendvars[numsendvars+flux]=sendnode[cntsend].vars[flux]; numsendvars+=numvars; #ifndef NDEBUG sendvars[numsendvars]=(double)mod(sendnode[cntsend].l,DOUBLE_INT_MAX); numsendvars++; #endif sendnode[cntsend].SENT=TRUE; } } if (thisproc!=-1){ if (MPI_Bsend(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes()."); if (MPI_Bsend(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes()."); } } while (thisproc!=-1); for (cnt=0; cnt<(numproc+2); cnt++){ recvproc[cnt]=-1; } for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){ l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL); rank2=_node_rank(gl, i, j, k); rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2)); if (rank1!=rank2 && rank2==thisrank){ /* rank1 is one process that we will need to get data from; store it in recvproc */ cntproc=0; while(recvproc[cntproc]!=-1 && recvproc[cntproc]!=rank1 ) { cntproc++; } assert(cntproc<numproc); recvproc[cntproc]=rank1; } } } cntproc=0; while (recvproc[cntproc]!=-1) { thisproc=recvproc[cntproc]; MPI_Recv(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD,&MPI_Status1); sendvars=(double *)realloc(sendvars,numsendvars*sizeof(double)); MPI_Recv(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD,&MPI_Status1); cntsend=0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){ l2=_ai_all(gl,i,j,k); assert(is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)); l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL); rank2=_node_rank(gl, i, j, k); rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2)); if (rank1!=rank2 && rank2==thisrank){ if (thisproc==rank1){ for (flux=0; flux<numvars; flux++) mpivars[flux]=sendvars[cntsend+flux]; cntsend+=numvars; #ifndef NDEBUG assert(mod(l2,DOUBLE_INT_MAX)==(long)sendvars[cntsend]); cntsend++; #endif l=_l_from_l_all(gl,l2); assert(is_node_bdry(np[l],TYPELEVEL)); assert(is_node_link(np[l],TYPELEVEL)); if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux]; assert(np[l].linkmusclvars!=NULL); np[l].numlinkmusclvars=(short)round(mpivars[nf]); for (offset=1; offset<hbw_resconv_fluid; offset++) { for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc]; } } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<nfe; flux++) np[l].bs->Uemfield[flux]=mpivars[flux]; } #endif } } } } cntproc++; } MPI_Buffer_detach( &bbuffer, &bbuffersize ); free(buffer); MPI_Barrier(MPI_COMM_WORLD); free(sendnode); free(recvproc); free(sendvars); update_linked_nodes_2(np, gl, TYPELEVEL); } #else//DISTMPI void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){ long i,j,k,l1,l2,flux; for_ijk(gl->domain,is,js,ks,ie,je,ke){ l1=_ai(gl,i,j,k); if (is_node_bdry(np[l1],TYPELEVEL) && is_node_link(np[l1],TYPELEVEL)){ #ifdef _CYCLE_MULTIZONE fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function."); #endif #ifdef _CYCLE_MULTIZONE_MARCHING fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function."); #endif assert(is_node_bdry(np[l1],TYPELEVEL)); l2=_node_link(np[l1],0,TYPELEVEL); if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) np[l1].bs->U[flux]=np[l2].bs->U[flux]; } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<nfe; flux++) np[l1].bs->Uemfield[flux]=np[l2].bs->Uemfield[flux]; } #endif } } } #endif//DISTMPI static bool is_node_in_region(bool(*FUNCT)(gl_t *, long, long, long), gl_t *gl, long i, long j, long k){ bool tmp; tmp=FUNCT(gl,i,j,k); return(tmp); } static bool is_node_in_region_extended_by_bb(bool(*FUNCT)(gl_t *, long, long, long), gl_t *gl, long i, long j, long k){ bool tmp; long cnti,cntj,cntk; tmp=FALSE; for_1DL(cnti,i-hbw_bdry_fluid,i+hbw_bdry_fluid){ for_2DL(cntj,j-hbw_bdry_fluid,j+hbw_bdry_fluid){ for_3DL(cntk,k-hbw_bdry_fluid,k+hbw_bdry_fluid){ if (FUNCT(gl,cnti,cntj,cntk)) tmp=TRUE; } } } return(tmp); } void resume_nodes_specified_in_function(np_t *np, gl_t *gl, bool(*FUNCT)(gl_t *, long, long, long)){ long i,j,k; long *noderes; long *bdryres; long numnoderes,numbdryres,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4)) if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long)); bdryres=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4)) if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long)); numnoderes=0; numbdryres=0; for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region_extended_by_bb(FUNCT,gl,i,j,k)) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { if (is_node_in_region(FUNCT,gl,i,j,k) ){ bdryres[numbdryres]=_ai(gl,i,j,k); numbdryres++; } noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } /* rebuild the working variables of the boundary nodes of the nodes resumed*/ for (cnt=0; cnt<numbdryres; cnt++){ if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) { update_bdry_node(np,gl,bdryres[cnt]); } } /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region(FUNCT,gl,i,j,k))) suspend_node(&(np[_ai(gl,i,j,k)])); } free(noderes); free(bdryres); } void resume_nodes_only_in_zone_and_update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long *bdryres; long numnoderes,numbdryres,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); bdryres=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; numbdryres=0; for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && (i>=zone.is-hbw_bdry_fluid) && (i<=zone.ie+hbw_bdry_fluid) if2DL(&& (j>=zone.js-hbw_bdry_fluid) && (j<=zone.je+hbw_bdry_fluid)) if3DL(&& (k>=zone.ks-hbw_bdry_fluid) && (k<=zone.ke+hbw_bdry_fluid))) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { if (is_node_in_zone(i,j,k,zone)){ bdryres[numbdryres]=_ai(gl,i,j,k); numbdryres++; } noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); /* rebuild the working variables of the boundary nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numbdryres; cnt++){ if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) { find_ijk_from_l(gl, bdryres[cnt], &i, &j, &k); if (is_node_in_zone(i, j, k, gl->domain)){ update_bdry_node(np,gl,bdryres[cnt]); } else { find_prim_fluid(np,bdryres[cnt],gl); } } } free(bdryres); /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_zone(i,j,k,zone))) suspend_node(&(np[_ai(gl,i,j,k)])); } } void resume_nodes_in_zone(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long numnoderes,cnt; zone_t zoneint; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; zoneint=_zone_intersection(gl->domain_lim,zone); for_ijk(zoneint,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); } void resume_nodes_only_in_zone(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long numnoderes,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && (i>=zone.is) && (i<=zone.ie) if2DL(&& (j>=zone.js) && (j<=zone.je)) if3DL(&& (k>=zone.ks) && (k<=zone.ke))) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_zone(i,j,k,zone))) suspend_node(&(np[_ai(gl,i,j,k)])); } } #ifdef UNSTEADY void increase_time_level(np_t *np, gl_t *gl){ long i,j,k,flux,l; gl->time+=gl->dt; gl->iter=0; add_double_to_codex(&(gl->cycle.codex),"time",gl->time); for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ l=_ai(gl,i,j,k); if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID))){ for (flux=0; flux<nf; flux++){ #if _RESTIME_BW > 3 np[l].bs->Um3[flux]=np[l].bs->Um2[flux]; #endif #if _RESTIME_BW > 2 np[l].bs->Um2[flux]=np[l].bs->Um1[flux]; #endif np[l].bs->Um1[flux]=np[l].bs->U[flux]; #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_RESIDUAL np[l].bs->trapezoidalm1[flux]=np[l].bs->trapezoidalm1_next[flux]; #endif } #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS find_musclvars(np[l],gl,np[l].bs->trapezoidalm1); #endif } #ifdef EMFIELD if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD))){ for (flux=0; flux<nfe; flux++){ np[l].bs->Uemfieldm1[flux]=np[l].bs->Uemfield[flux]; } } #endif } } #endif//UNSTEADY void runtime_actions(char *actionname, char **argum, SOAP_codex_t *codex){ char *oldfilename; oldfilename=(char *)malloc(sizeof(char)*(5+strlen((((readcontrolarg_t *)codex->action_args)->gl->output_filename)))); strcpy(oldfilename,(((readcontrolarg_t *)codex->action_args)->gl->output_filename)); if (strcmp(actionname,"WriteDataFile")==0) { if (SOAP_number_argums(*argum)==1){ SOAP_substitute_all_argums(argum, codex); SOAP_get_argum_string(codex,&(((readcontrolarg_t *)codex->action_args)->gl->output_filename),*argum,0); } if (SOAP_number_argums(*argum)>1){ SOAP_fatal_error(codex,"Action WriteDataFile() can not be called with more than 1 argument. Either it is called with one argument (a string containing the data file name) or with no argument. If no argument is given, the default data file name as specified on the command line will be used."); } write_data_file(*((readcontrolarg_t *)codex->action_args)->np, ((readcontrolarg_t *)codex->action_args)->gl); codex->ACTIONPROCESSED=TRUE; } strcpy((((readcontrolarg_t *)codex->action_args)->gl->output_filename),oldfilename); free(oldfilename); if (strcmp(actionname,"Init")==0) { read_init(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Model")==0) { read_model(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Disc")==0) { read_disc(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Cycle")==0) { read_cycle(*argum, codex); codex->action=&runtime_actions; codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Post")==0) { read_post(*argum, codex); codex->action=&runtime_actions; codex->ACTIONPROCESSED=TRUE; } runtime_actions_cycle_specific(actionname,argum,codex); } void write_cycle_template(FILE **controlfile){ wfprintf(*controlfile, "\n\n" "Cycle(\n" ); write_cycle_fluid_template(controlfile); #ifdef EMFIELD write_cycle_emfield_template(controlfile); #endif write_runtime_template(controlfile); wfprintf(*controlfile, ");\n" ); } void read_cycle_actions(char *actionname, char **argum, SOAP_codex_t *codex){ gl_t *gl; gl=((readcontrolarg_t *)codex->action_args)->gl; if (strcmp(actionname,_CYCLE_ACTIONNAME)==0 && !gl->CONTROL_READ) { if (((readcontrolarg_t *)codex->action_args)->VERBOSE) wfprintf(stdout,"%s..",_CYCLE_ACTIONNAME); ((readcontrolarg_t *)codex->action_args)-> gl->cycle.code_runtime=(char *)malloc((strlen(*argum)+2)*sizeof(char)); strcpy(((readcontrolarg_t *)codex->action_args)->gl->cycle.code_runtime,*argum); ((readcontrolarg_t *)codex->action_args)->gl->cycle.RUNTIMEMODULEFOUND=TRUE; codex->ACTIONPROCESSED=TRUE; } read_cycle_fluid_actions(actionname, argum, codex); read_cycle_emfield_actions(actionname, argum, codex); } void read_cycle(char *argum, SOAP_codex_t *codexcontrol){ gl_t *gl; gl=((readcontrolarg_t *)codexcontrol->action_args)->gl; if (!gl->CONTROL_READ){ gl->cycle.RUNTIMEMODULEFOUND=FALSE; } codexcontrol->action=&read_cycle_actions; SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL); if (!gl->CONTROL_READ){ if (!gl->CYCLE_FLUID_READ) fatal_error("The fluid module %s() was not found within Cycle().",_FLUID_ACTIONNAME); if (!gl->CYCLE_EMFIELD_READ) fatal_error("The emfield module %s() was not found within Cycle().",_EMFIELD_ACTIONNAME); if (!gl->cycle.RUNTIMEMODULEFOUND) fatal_error("The module %s() was not found within Cycle().",_CYCLE_ACTIONNAME); init_cycle(argum,codexcontrol); } } void write_disc_template(FILE **controlfile){ wfprintf(*controlfile, "\n\n" "Disc(\n" ); write_disc_fluid_template(controlfile); #ifdef EMFIELD write_disc_emfield_template(controlfile); #endif write_disc_resconv_template(controlfile); write_disc_restime_template(controlfile); wfprintf(*controlfile, ");\n" ); } void read_disc_actions(char *actionname, char **argum, SOAP_codex_t *codex){ // gl_t *gl; // gl=((readcontrolarg_t *)codex->action_args)->gl; read_disc_fluid_actions(actionname, argum, codex); read_disc_emfield_actions(actionname, argum, codex); read_disc_resconv_actions(actionname, argum, codex); read_disc_restime_actions(actionname, argum, codex); } void read_disc(char *argum, SOAP_codex_t *codexcontrol){ gl_t *gl; gl=((readcontrolarg_t *)codexcontrol->action_args)->gl; codexcontrol->action=&read_disc_actions; gl->DISC_FLUID_READ=FALSE; gl->DISC_EMFIELD_READ=FALSE; gl->DISC_RESCONV_READ=FALSE; gl->DISC_RESTIME_READ=FALSE; SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL); if (!gl->CONTROL_READ){ if (!gl->DISC_FLUID_READ) fatal_error("The fluid module %s() was not found within Disc().",_FLUID_ACTIONNAME); if (!gl->DISC_EMFIELD_READ) fatal_error("The emfield module %s() was not found within Disc().",_EMFIELD_ACTIONNAME); if (!gl->DISC_RESCONV_READ) fatal_error("The residual convection module %s() was not found within Disc().",_RESCONV_ACTIONNAME); if (!gl->DISC_RESTIME_READ) fatal_error("The residual time module %s() was not found within Disc().",_RESTIME_ACTIONNAME); } } #ifdef DISTMPI /* not used anymore */ void MPI_Allreduce_Sum_Cliplist(char **cliplist_str){ int rank,numproc,proc,thiscliplist_len; char *cliplistmem_str,*thiscliplist_str; cliplistmem_str=(char *)malloc((strlen(*cliplist_str)+10)*sizeof(char)); strcpy(cliplistmem_str,*cliplist_str); thiscliplist_str=(char *)malloc(sizeof(char)); strcpy(*cliplist_str,""); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (proc=0; proc<numproc; proc++){ if (proc==rank) { thiscliplist_len=strlen(cliplistmem_str); thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1)); strcpy(thiscliplist_str,cliplistmem_str); } MPI_Bcast(&thiscliplist_len,1,MPI_INT,proc,MPI_COMM_WORLD); thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1)); MPI_Bcast(thiscliplist_str,thiscliplist_len+1,MPI_CHAR,proc,MPI_COMM_WORLD); *cliplist_str=(char *)realloc(*cliplist_str,sizeof(char)*(strlen(*cliplist_str)+thiscliplist_len+1)); strcat(*cliplist_str,thiscliplist_str); } free(cliplistmem_str); free(thiscliplist_str); } void find_clipped_variables_all(gl_t *gl){ int rank,numproc,proc,cnt; int thisclipnamenum,thisclipname_len; char *thisclipname; long thisclipnum; reset_clipped_variables_all(gl); thisclipname=(char *)malloc(sizeof(char)); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (proc=0; proc<numproc; proc++){ if (proc==rank) { thisclipnamenum=gl->model.clipnamenum; } MPI_Bcast(&thisclipnamenum,1,MPI_INT,proc,MPI_COMM_WORLD); for (cnt=0; cnt<thisclipnamenum; cnt++){ if (proc==rank) { thisclipname_len=strlen(gl->model.clipname[cnt]); } MPI_Bcast(&thisclipname_len,1,MPI_INT,proc,MPI_COMM_WORLD); thisclipname=(char *)realloc(thisclipname,sizeof(char)*(thisclipname_len+1)); if (proc==rank) { strcpy(thisclipname,gl->model.clipname[cnt]); thisclipnum=gl->model.clipnum[cnt]; } MPI_Bcast(thisclipname,thisclipname_len+1,MPI_CHAR,proc,MPI_COMM_WORLD); MPI_Bcast(&thisclipnum,1,MPI_LONG,proc,MPI_COMM_WORLD); add_to_clipped_variables_all(gl, thisclipname, thisclipnum); // if (rank==0) printf("\n_%s(%ld)%d_",thisclipname,thisclipnum,proc); } } free(thisclipname); } #endif void update_runtime_codex_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){ char *cliplist_str; #ifdef DISTMPI int rank,proc; long ijk_ximax; struct { double ximax; int rank; } ximaxrank,ximaxrank_max; #ifdef EMFIELD long ijk_ximax_emfield; struct { double ximax; int rank; } ximaxrank_emfield,ximaxrank_max_emfield; #endif #endif//DISTMPI #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); if (rank!=0) codex->SCREENOUTPUT=FALSE; #endif cliplist_str=(char *)malloc(sizeof(char)); #ifdef DISTMPI find_clipped_variables_all(gl); find_clipped_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo",cliplist_str); find_clipped_muscl_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_muscl",cliplist_str); find_clipped_bdry_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_bdry",cliplist_str); #else find_clipped_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo",cliplist_str); find_clipped_muscl_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_muscl",cliplist_str); find_clipped_bdry_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_bdry",cliplist_str); //MPI_Allreduce_Sum_Cliplist(&cliplist_str); #endif free(cliplist_str); #ifdef DISTMPI ximaxrank.ximax=gl->ximax; ximaxrank.rank=rank; MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); add_double_to_codex(codex,"ximax",ximaxrank_max.ximax); ijk_ximax=gl->i_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"i_ximax",ijk_ximax); #ifdef EMFIELD ximaxrank_emfield.ximax=gl->ximax_emfield; ximaxrank_emfield.rank=rank; MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); add_double_to_codex(codex,"ximax_emfield",ximaxrank_max_emfield.ximax); ijk_ximax_emfield=gl->i_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"i_ximax_emfield",ijk_ximax_emfield); #endif #ifdef _2DL ijk_ximax=gl->j_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"j_ximax",ijk_ximax); #ifdef EMFIELD ijk_ximax_emfield=gl->j_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"j_ximax_emfield",ijk_ximax_emfield); #endif #endif//_2DL #ifdef _3DL ijk_ximax=gl->k_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"k_ximax",ijk_ximax); #ifdef EMFIELD ijk_ximax_emfield=gl->k_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"k_ximax_emfield",ijk_ximax_emfield); #endif #endif//_3DL #else//DISTMPI add_double_to_codex(codex,"ximax",gl->ximax); add_int_to_codex(codex,"i_ximax",gl->i_ximax); #ifdef EMFIELD add_double_to_codex(codex,"ximax_emfield",gl->ximax_emfield); add_int_to_codex(codex,"i_ximax_emfield",gl->i_ximax_emfield); #endif #ifdef _2DL add_int_to_codex(codex,"j_ximax",gl->j_ximax); #ifdef EMFIELD add_int_to_codex(codex,"j_ximax_emfield",gl->j_ximax_emfield); #endif #endif//_2DL #ifdef _3DL add_int_to_codex(codex,"k_ximax",gl->k_ximax); #ifdef EMFIELD add_int_to_codex(codex,"k_ximax_emfield",gl->k_ximax_emfield); #endif #endif//_3DL #endif//DISTMPI } void update_runtime_codex_vars_except_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){ #ifdef DISTMPI double effiter_U_sum,effiter_R_sum; int rank,proc; #ifdef EMFIELD double effiter_U_sum_emfield,effiter_R_sum_emfield; #endif #endif//DISTMPI #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); if (rank!=0) codex->SCREENOUTPUT=FALSE; #endif add_int_to_codex(codex,"iter", gl->iter); add_double_to_codex(codex,"xiverge",gl->cycle.fluid.xiverge); add_string_to_codex(codex,"outputfilename", gl->output_filename); #ifdef EMFIELD add_double_to_codex(codex,"xiverge_emfield",gl->cycle.emfield.xiverge); #endif #if defined(UNSTEADY) add_double_to_codex(codex,"time",gl->time); #endif add_double_to_codex(codex,"CFL",gl->CFL); #ifdef UNSTEADY add_double_to_codex(codex,"dt",gl->dt); #endif #ifdef _CYCLE_MULTIZONE_MARCHING add_double_to_codex(codex,"window.is",gl->window.is); add_double_to_codex(codex,"window.ie",gl->window.ie); add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0); add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0); #endif #ifdef _CYCLE_MULTIZONE add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0); add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0); #endif #ifdef DISTMPI MPI_Allreduce(&gl->effiter_U, &effiter_U_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_U",effiter_U_sum); MPI_Allreduce(&gl->effiter_R, &effiter_R_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_R",effiter_R_sum); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_U_emfield",effiter_U_sum_emfield); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_R_emfield",effiter_R_sum_emfield); #endif #else//DISTMPI add_double_to_codex(codex,"effiter_U",gl->effiter_U); add_double_to_codex(codex,"effiter_R",gl->effiter_R); #ifdef EMFIELD add_double_to_codex(codex,"Lc",gl->Lc); // add_double_to_codex(codex,"relaxEMF",gl->relaxEMF); add_double_to_codex(codex,"effiter_U_emfield",gl->effiter_U_emfield); add_double_to_codex(codex,"effiter_R_emfield",gl->effiter_R_emfield); #endif #endif//DISTMPI } void add_constants_to_codex(gl_t *gl, SOAP_codex_t *codex){ char str[100]; sprintf(str, "%d", TSEMF_DEFAULT); SOAP_add_to_vars(codex,"TSEMF_DEFAULT",str); sprintf(str, "%d", TSEMF_ADI); SOAP_add_to_vars(codex,"TSEMF_ADI",str); sprintf(str, "%d", TSEMF_DDADI); SOAP_add_to_vars(codex,"TSEMF_DDADI",str); sprintf(str, "%d", TSEMF_IMAF); SOAP_add_to_vars(codex,"TSEMF_IMAF",str); sprintf(str, "%d", TSEMF_ADIIMAF); SOAP_add_to_vars(codex,"TSEMF_ADIIMAF",str); sprintf(str, "%d", TSEMF_NEWTON); SOAP_add_to_vars(codex,"TSEMF_NEWTON",str); sprintf(str, "%d", TSEMF_ADIi); SOAP_add_to_vars(codex,"TSEMF_ADIi",str); sprintf(str, "%d", TSEMF_ADIk); SOAP_add_to_vars(codex,"TSEMF_ADIk",str); sprintf(str, "%d", TSEMF_IMAFk); SOAP_add_to_vars(codex,"TSEMF_IMAFk",str); sprintf(str, "%d", TSEMF_IMAFi); SOAP_add_to_vars(codex,"TSEMF_IMAFi",str); sprintf(str, "%d", TSEMF_SOR); SOAP_add_to_vars(codex,"TSEMF_SOR",str); sprintf(str, "%d", TSEMF_SOR2); SOAP_add_to_vars(codex,"TSEMF_SOR2",str); sprintf(str, "%d", PRECON_CONSTANTTIMESTEP); SOAP_add_to_vars(codex,"PRECON_CONSTANTTIMESTEP",str); sprintf(str, "%d", PRECON_LOCALTIMESTEP); SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP",str); sprintf(str, "%d", PRECON_LOCALTIMESTEP2); SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP2",str); sprintf(str, "%d", PRECON_LOCALEIGENVALUE); SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE",str); sprintf(str, "%d", PRECON_LOCALEIGENVALUE2); SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE2",str); } void process_code_runtime(np_t *np, gl_t *gl, char *code_runtime, SOAP_codex_t *codex){ char *code; SOAP_vars_t *varsmem; readcontrolarg_t Runtimearg; varsmem=(SOAP_vars_t *)malloc(sizeof(SOAP_vars_t)); SOAP_copy_all_vars(codex->vars, &varsmem); Runtimearg.np=&np; Runtimearg.gl=gl; Runtimearg.input=(input_t *)malloc(sizeof(input_t)); Runtimearg.input->READDATAFILE=FALSE; Runtimearg.TYPELEVEL=TYPELEVEL_FLUID; Runtimearg.module_level=0; Runtimearg.POSTMODULE=FALSE; Runtimearg.CYCLEMODULE=FALSE; Runtimearg.RESETITERCOUNT=FALSE; Runtimearg.VERBOSE=FALSE; Runtimearg.gl_post=*gl; Runtimearg.domain_post=gl->domain; Runtimearg.np_post=np; if (!gl->cycle.RUNTIMEMODULEFOUND) fatal_error("The %s() module was not found within Cycle().",_CYCLE_ACTIONNAME); code=(char *)malloc((strlen(code_runtime)+2)*sizeof(char)); strcpy(code,code_runtime); codex->ACTION=TRUE; codex->action=&runtime_actions; codex->action_args=(void *)&Runtimearg; ((readcontrolarg_t *)codex->action_args)->np=&np; ((readcontrolarg_t *)codex->action_args)->gl=gl; /* if (codex->action_being_processed==NULL){ codex->action_being_processed=(char *)malloc((strlen(_CYCLE_ACTIONNAME)+2)*sizeof(char)); strcpy(codex->action_being_processed,_CYCLE_ACTIONNAME); }*/ codex->VERBOSE=FALSE; codex->SCREENOUTPUT=TRUE; add_constants_to_codex(gl, codex); update_runtime_codex_xi_from_gl(gl, codex); update_runtime_codex_vars_except_xi_from_gl(gl,codex); SOAP_process_code(code, codex, SOAP_VARS_KEEP_ALL); gl->CFL=SOAP_var_value(codex,"CFL"); #ifdef UNSTEADY gl->dt=SOAP_var_value(codex,"dt"); #endif gl->ximax=SOAP_var_value(codex,"ximax"); assert(gl->CFL>=0.0e0); /* here, make sure that all changes to vars within runtime module are erased, because those will not be written to datafile -> CFL and ximax and dt are exception to this, and this is why they are probed through SOAP_var_value above */ if (gl->RESETRUNTIMEVARS){ SOAP_free_all_vars(codex->vars); SOAP_copy_all_vars(varsmem,&(codex->vars)); } free(Runtimearg.input); SOAP_free_all_vars(varsmem); free(varsmem); free(code); reset_clipped_variables(gl); } void find_ximax(np_t *np, gl_t *gl, zone_t zone, int IJK_UPDATE){ long i,j,k; double xi; gl->ximax=0.0e0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) { assert(is_node_resumed(np[_ai(gl,i,j,k)])); xi=np[_ai(gl,i,j,k)].wk->xi; if (xi<-1.0e99 || isnan(xi)) { fatal_error("problem with xi (xi=%E) at i=%ld, j=%ld, k=%ld.",xi,i,j,k); } if (xi>=gl->ximax) { gl->ximax=xi; if (IJK_UPDATE==IJK_UPDATE_YES) { gl->i_ximax=i; gl->j_ximax=j; gl->k_ximax=k; } } } } } /* static void PrintZones(zone_t *zones, long numzone){ long cnt; for (cnt=0; cnt<numzone; cnt++){ printf("%ld is=%ld js=%ld ie=%ld je=%ld\n",cnt,zones[cnt].is,zones[cnt].js, zones[cnt].ie,zones[cnt].je); } printf("\n"); } */ static void rearrange_overlapping_zones(zone_t *zones, long numzone){ long cnt1,cnt2; /* PrintZone(zones,numzones); */ for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along i : make ie of zones[cnt1] smaller and is of zones[cnt2] bigger */ if (if3DL( zones[cnt1].ks==zones[cnt2].ks && ) if2DL( zones[cnt1].js==zones[cnt2].js && ) if3DL( zones[cnt1].ke==zones[cnt2].ke && ) if2DL( zones[cnt1].je==zones[cnt2].je && ) zones[cnt1].ie< zones[cnt2].ie && zones[cnt1].ie>=zones[cnt2].is) { zones[cnt1].ie=(zones[cnt1].ie+zones[cnt2].is)/2; zones[cnt2].is=zones[cnt1].ie+1; if ( zones[cnt1].is>zones[cnt1].ie || zones[cnt2].is>zones[cnt2].ie ) fatal_error("Problem modifying zones along i."); } } } } #ifdef _2DL for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along j : make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/ if (if3DL( zones[cnt1].ks==zones[cnt2].ks && ) zones[cnt1].is==zones[cnt2].is && if3DL( zones[cnt1].ke==zones[cnt2].ke && ) zones[cnt1].ie==zones[cnt2].ie && zones[cnt1].je< zones[cnt2].je && zones[cnt1].je>=zones[cnt2].js) { zones[cnt1].je=(zones[cnt1].je+zones[cnt2].js)/2; zones[cnt2].js=zones[cnt1].je+1; if ( zones[cnt1].js>zones[cnt1].je || zones[cnt2].js>zones[cnt2].je ) fatal_error("Problem modifying zones along j."); } } } } #endif #ifdef _3DL for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along k : make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/ if (zones[cnt1].is==zones[cnt2].is && zones[cnt1].js==zones[cnt2].js && zones[cnt1].ie==zones[cnt2].ie && zones[cnt1].je==zones[cnt2].je && zones[cnt1].ke< zones[cnt2].ke && zones[cnt1].ke>=zones[cnt2].ks) { zones[cnt1].ke=(zones[cnt1].ke+zones[cnt2].ks)/2; zones[cnt2].ks=zones[cnt1].ke+1; if ( zones[cnt1].ks>zones[cnt1].ke || zones[cnt2].ks>zones[cnt2].ke ) fatal_error("Problem modifying zones along k."); } } } } #endif /* PrintZone(zone,numzone); */ } /* setup multizone situated inside zone */ void setup_multizone(np_t *np, gl_t *gl, zone_t zone, zone_t lim, double xiverge, long zonelength, bool UPDATE_ALL_ZONES, multizone_t *multizone){ long cnt; long numsubzones; zone_t *subzones; double ximax; long i,j,k; /* find the zones for the ts process */ subzones=(zone_t *)malloc(sizeof(zone_t)); find_subzones_in_zone_given_zonelength(zonelength, zone, &numsubzones, &subzones); /* find out which zones need to be updated */ multizone->numzones_ts=0; multizone->ts=(zone_t *)malloc(numsubzones*sizeof(zone_t)); for (cnt=0; cnt<numsubzones; cnt++){ ximax=0.0e0; for_ijk(subzones[cnt],is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) { ximax=max(ximax,np[_ai(gl,i,j,k)].wk->xi); } } if (ximax>xiverge || UPDATE_ALL_ZONES) { multizone->ts[multizone->numzones_ts]=subzones[cnt]; (multizone->numzones_ts)++; } } /* setup res and bdry, limited by lim_is,lim_js, etc*/ multizone->bdry=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t)); multizone->res=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t)); for (cnt=0; cnt<multizone->numzones_ts; cnt++){ multizone->bdry[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid); multizone->bdry[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid); #ifdef _2DL multizone->bdry[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid); multizone->bdry[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid); #endif #ifdef _3DL multizone->bdry[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid); multizone->bdry[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid); #endif multizone->res[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid+hbw_res_fluid); #ifdef _2DL multizone->res[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid+hbw_res_fluid); #endif #ifdef _3DL multizone->res[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid+hbw_res_fluid); #endif } multizone->numzones_total=numsubzones; multizone->numzones_res=multizone->numzones_ts; multizone->numzones_bdry=multizone->numzones_ts; free(subzones); rearrange_overlapping_zones(multizone->res,multizone->numzones_res); } void *thread_zone(void *threadzone){ np_t * np = ((threadzone_t *) threadzone)->np; gl_t * gl = ((threadzone_t *) threadzone)->gl; zone_t zone = ((threadzone_t *) threadzone)->zone; ((threadzone_t *) threadzone)->funct(np,gl,zone); return(NULL); } void create_thread_zone(np_t *np, gl_t * gl, zone_t zone, void (*funct)(np_t *, gl_t *, zone_t zone), pthread_t *pthread, threadzone_t *threadzone){ threadzone->np=np; threadzone->gl=gl; threadzone->zone=zone; threadzone->funct=funct; #ifdef ZONETHREADS if (pthread_create(pthread, NULL, thread_zone, threadzone)) fatal_error("Cannot create thread."); #else (*thread_zone)(threadzone); #endif } void join_all_threads_zone(long numthread, pthread_t *pthread, bool COUNTFLAG){ #ifdef ZONETHREADS long thread; void *retval; if (!COUNTFLAG) { for (thread=0; thread<numthread; thread++){ if (pthread_join(pthread[thread],&retval)) fatal_error("Cannot join thread %ld.",thread); } } #endif } static void update_U_from_dUstar_1(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); add_dUstar_to_U(np,l,gl,np[l].wk->dUstar); thread_lock_node_unset(np,l,THREADTYPE_ZONE); /* - if not using SMALLTHREADS, only need to lock for the loop threads, since gl is local for the zone thread - if using SMALLTHREADS, then need to lock for both the loop and zone threads For now, lock for both the loop and zone threads */ thread_lock_global_set(gl,THREADTYPE_ALL); gl->effiter_U+=1.0/(double)(gl->nn); thread_lock_global_unset(gl,THREADTYPE_ALL); } } static void update_U_from_dUstar(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_1,SWEEPTYPE_I,TYPELEVEL_FLUID_WORK, &is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE); } long _numthread_optimized(long numzone){ long l,cnt,lmax,numthread; numthread=numzone; if (numzone>maxzonethread) { lmax=0; for (cnt=1; cnt<=maxzonethread; cnt++){ l=mod(numzone,cnt); if (l==0) l=cnt; if (l>lmax) { numthread=cnt; lmax=l; } } } return(numthread); } #ifdef DISTMPI void exchange_U(np_t *np, gl_t *gl){ int bl,rankrecv,numproc,ranksend,thisrank,pack_size_Ulocal,pack_size_cnt; long i,j,k,flux,iterator,cnt,prevcnt,total; long primcnt=0; long bufsize=0; long *recvcnt,*sendcnt,*processcnt; long *primnodenums=NULL; long *processnodenums; double *buf,*bufptr; zone_t zonesend,zonerecv,zone; flux_t *recvUlocal; flux_t *sendUlocal=NULL; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); recvcnt=(long *)malloc(numproc*sizeof(long)); sendcnt=(long *)malloc(numproc*sizeof(long)); processcnt=(long *)malloc(numproc*sizeof(long)); processnodenums=(long *)malloc(numproc*sizeof(long)); for (i=0; i<numproc; i++){ sendcnt[i]=0; processcnt[i]=0; } for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (total=0,iterator=0; iterator<numproc; iterator++) total+=sendcnt[iterator]; for (prevcnt=0,iterator=0; iterator<=rankrecv; iterator++) prevcnt+=sendcnt[iterator]; sendUlocal=(flux_t *)realloc(sendUlocal,(total+1)*sizeof(flux_t)); for (iterator=prevcnt+1;iterator<total+1;iterator++){ for (flux=0; flux<nf; flux++) *(*(sendUlocal + iterator) + flux)=*(*(sendUlocal + iterator-1) + flux); } for (flux=0; flux<nf; flux++) *(*(sendUlocal + prevcnt) + flux)=np[_ai(gl,i,j,k)].bs->U[flux]; sendcnt[rankrecv]++; } if (rankrecv==thisrank){ for (prevcnt=0,iterator=0; iterator<=ranksend; iterator++) prevcnt+=processcnt[iterator]; processnodenums=(long *)realloc(processnodenums,(prevcnt+1)*sizeof(long)); processnodenums[prevcnt]=_ai(gl,i,j,k); processcnt[ranksend]++; if (is_node_resumed(np[_ai(gl,i,j,k)])){ primnodenums=(long *)realloc(primnodenums,(primcnt+1)*sizeof(long)); primnodenums[primcnt]=_ai(gl,i,j,k); primcnt++; } } } } } } } if(numproc != 1){ for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (thisrank!=rankrecv){ MPI_Pack_size(nf*sendcnt[rankrecv],MPI_DOUBLE,MPI_COMM_WORLD,&pack_size_Ulocal); MPI_Pack_size(1,MPI_LONG,MPI_COMM_WORLD,&pack_size_cnt); bufsize+=(2*MPI_BSEND_OVERHEAD)+pack_size_Ulocal+pack_size_cnt; } } buf=(double *)malloc(bufsize); MPI_Buffer_attach(buf, bufsize); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (thisrank!=rankrecv){ for (prevcnt=0,iterator=0; iterator<rankrecv; iterator++) prevcnt+=sendcnt[iterator]; MPI_Bsend(&sendcnt[rankrecv],1,MPI_LONG,rankrecv,1,MPI_COMM_WORLD); MPI_Bsend(&sendUlocal[prevcnt],nf*sendcnt[rankrecv],MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } } free(sendUlocal); for (ranksend=0; ranksend<numproc; ranksend++){ if (thisrank!=ranksend){ MPI_Recv(&recvcnt[ranksend],1,MPI_LONG,ranksend,1,MPI_COMM_WORLD,&MPI_Status1); recvUlocal=(flux_t *)malloc(recvcnt[ranksend]*sizeof(flux_t)); MPI_Recv(recvUlocal,recvcnt[ranksend]*nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (cnt=0; cnt<recvcnt[ranksend]; cnt++){ for (prevcnt=0,iterator=0; iterator<ranksend; iterator++) prevcnt+=processcnt[iterator]; for (flux=0; flux<nf; flux++) np[processnodenums[prevcnt+cnt]].bs->U[flux]=*(*(recvUlocal + cnt) + flux); } free(recvUlocal); } } #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<primcnt; cnt++) find_prim_fluid(np,primnodenums[cnt],gl); MPI_Buffer_detach(&bufptr,&bl); free(buf); } free(processnodenums); free(primnodenums); free(processcnt); free(recvcnt); free(sendcnt); MPI_Barrier(MPI_COMM_WORLD); } void exchange_U_old(np_t *np, gl_t *gl){ //same as above but without the MPI_Buffer int rankrecv,numproc,ranksend,thisrank; long i,j,k,flux; long cnt = 0; long *nodenums = NULL; zone_t zonesend,zonerecv,zone; flux_t Ulocal; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (flux=0; flux<nf; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->U[flux]; MPI_Send(Ulocal,nf,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } if (rankrecv==thisrank) { MPI_Recv(Ulocal,nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (flux=0; flux<nf; flux++) np[_ai(gl,i,j,k)].bs->U[flux]=Ulocal[flux]; if (is_node_resumed(np[_ai(gl,i,j,k)])){ nodenums=(long *)realloc(nodenums,(cnt+1)*sizeof(long)); nodenums[cnt]=_ai(gl,i,j,k); cnt++; } } } } } } } #ifdef OPENMPTHREADS #pragma omp parallel for private(i) schedule(dynamic) #endif for (i=0; i<cnt; i++) find_prim_fluid(np,nodenums[i],gl); free(nodenums); MPI_Barrier(MPI_COMM_WORLD); } #endif void update_U_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt,numzonethread,cntthread; pthread_t *pthread; threadzone_t *threadzone; /* Find dUstar for inner nodes*/ numzonethread=_numthread_optimized(multizone.numzones_ts); pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t)); threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t)); cntthread=0; for (cnt=0; cnt<multizone.numzones_ts; cnt++) { create_thread_zone(np, gl, multizone.ts[cnt], &find_dU, &(pthread[cntthread]), &(threadzone[cntthread])); cntthread++; if (cntthread==numzonethread) { join_all_threads_zone(cntthread, pthread, FALSE); cntthread=0; } } if (cntthread>0) join_all_threads_zone(cntthread, pthread, FALSE); for (cnt=0; cnt<multizone.numzones_ts; cnt++) update_U_from_dUstar(np, gl, multizone.ts[cnt]); free(pthread); free(threadzone); } void update_bdry_nodes_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt; for (cnt=0; cnt<multizone.numzones_bdry; cnt++) update_bdry_nodes(np, gl, multizone.bdry[cnt]); } void find_residual_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt,numzonethread,cntthread; pthread_t *pthread; threadzone_t *threadzone; numzonethread=_numthread_optimized(multizone.numzones_res); pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t)); threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t)); cntthread=0; for (cnt=0; cnt<multizone.numzones_res; cnt++) { create_thread_zone(np, gl, multizone.res[cnt], &find_residual, &(pthread[cntthread]), &(threadzone[cntthread])); cntthread++; if (cntthread==numzonethread) { join_all_threads_zone(cntthread, pthread, FALSE); cntthread=0; } } if (cntthread>0) { join_all_threads_zone(cntthread, pthread, FALSE); } free(pthread); free(threadzone); } void solve_multizone(np_t *np, gl_t *gl, multizone_t multizone){ update_U_with_multizone(np,gl,multizone); update_bdry_nodes_with_multizone(np,gl,multizone); find_residual_with_multizone(np,gl,multizone); } void free_multizone(multizone_t *multizone){ free(multizone->res); free(multizone->bdry); free(multizone->ts); } void check_residual(np_t *np, gl_t *gl, zone_t zone){ resume_nodes_in_zone(np, gl, zone); #ifdef EMFIELD update_prim_emfield_mem_in_zone(np, gl, zone); #endif find_residual(np, gl, zone); find_ximax(np,gl,zone,IJK_UPDATE_YES); #ifdef EMFIELD find_residual_emfield(np,gl,zone); find_ximax_emfield(np, gl, zone); #endif #ifdef DISTMPI int rank,proc; struct { double ximax; int rank; } ximaxrank,ximaxrank_max; #ifdef EMFIELD struct { double ximax; int rank; } ximaxrank_emfield,ximaxrank_max_emfield; #endif MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); ximaxrank.ximax=gl->ximax; ximaxrank.rank=rank; MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); gl->ximax=ximaxrank_max.ximax; MPI_Bcast(&(gl->i_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD ximaxrank_emfield.ximax=gl->ximax_emfield; ximaxrank_emfield.rank=rank; MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); gl->ximax_emfield=ximaxrank_max_emfield.ximax; MPI_Bcast(&(gl->i_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #ifdef _2DL MPI_Bcast(&(gl->j_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD MPI_Bcast(&(gl->j_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #endif //_2DL #ifdef _3DL MPI_Bcast(&(gl->k_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD MPI_Bcast(&(gl->k_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #endif //_3DL #endif //DISTMPI } double _xi(np_t np, gl_t *gl, flux_t Res){ long flux; double xi,xitmp; assert_np(np,is_node_resumed(np)); xi=0.0; for (flux=0; flux<nf; flux++) { xitmp=fabs(Res[flux]/_Omega(np,gl)/gl->cycle.fluid.Uref[flux]); xi=max(xi,xitmp); if (isnan(xitmp)){ fatal_error("problem computing xitmp in function _xi() in cycle_share.c;\n xitmp=%E\n Res[%ld]=%E\n Omega=%E\n Uref[%ld]=%E\n",xitmp,flux,Res[flux],_Omega(np,gl),flux,gl->cycle.fluid.Uref[flux]); } } return(xi); } static void find_Delta_Lambda_for_dtau_local(np_t *np, gl_t *gl, long l, long dim, flux_t Delta_Lambda){ long offset,maxoffset,flux,dim2; flux_t Delta_Lambda_tmp; find_Delta_Lambda_for_dtau(np, gl, l, dim, Delta_Lambda); if (gl->PRECONDITIONER==PRECON_LOCALTIMESTEP2){ maxoffset=1; for (dim2=dim; dim2<=dim; dim2++){ for (offset=1; offset<=maxoffset; offset++){ if (is_node_inner(np[_al(gl,l,dim2,-offset)],TYPELEVEL_FLUID_WORK)){ find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,-offset), dim, Delta_Lambda_tmp); for (flux=0; flux<nf; flux++) Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]); } if (is_node_inner(np[_al(gl,l,dim2,+offset)],TYPELEVEL_FLUID_WORK)){ find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,+offset), dim, Delta_Lambda_tmp); for (flux=0; flux<nf; flux++) Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]); } } } } } void find_dtau(np_t *np, gl_t *gl, long l, flux_t dtau){ double dtaumin,dtaumax; long dim,flux; double dtaulocal[nf][nd]; #ifdef UNSTEADY sqmat_t LambdaZ; #endif flux_t Delta_Lambda; assert_np(np[l],is_node_inner(np[l],TYPELEVEL_FLUID_WORK)); if (gl->PRECONDITIONER!=PRECON_CONSTANTTIMESTEP){ #ifdef UNSTEADY find_LambdaZ(np,gl,l,LambdaZ); set_matrix_to_identity(LambdaZ); //turn off effect of LambdaZ -> seems to be detrimental not beneficial for (dim=0; dim<nd; dim++){ find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda); for (flux=0; flux<nf; flux++){ assert(LambdaZ[flux][flux]>0.0); dtaulocal[flux][dim]=gl->dt/LambdaZ[flux][flux]/notzero(Delta_Lambda[flux]*gl->dt/LambdaZ[flux][flux]+1.0,1e-39); } } #else for (dim=0; dim<nd; dim++){ find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda); for (flux=0; flux<nf; flux++){ dtaulocal[flux][dim]=1.0/notzero(Delta_Lambda[flux],1e-39); } } #endif /* find optimal dtaus for each flux */ for (flux=0; flux<nf; flux++){ dtaumin=1.0e99; dtaumax=0.0e0; for (dim=0; dim<nd; dim++){ dtaumin=min(dtaulocal[flux][dim],dtaumin); dtaumax=max(dtaulocal[flux][dim],dtaumax); } dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax); dtau[flux]=gl->CFL*pow(dtaumin,1.0e0-gl->sigma1)*pow(dtaumax,gl->sigma1); } } else { for (flux=0; flux<nf; flux++){ dtau[flux]=gl->dtau; } } } void find_constant_dtau(np_t *np, gl_t *gl, long l, double *dtau){ long flux; flux_t dtau_vector; double dtaumin,dtaumax; find_dtau(np,gl,l,dtau_vector); /* average min and max dtau */ dtaumin=1.0e99; dtaumax=-1.0e99; for (flux=0; flux<nf; flux++) dtaumin=min(dtaumin,dtau_vector[flux]); for (flux=0; flux<nf; flux++) dtaumax=max(dtaumax,dtau_vector[flux]); dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax); *dtau=pow(dtaumin,1.0-gl->sigma2)*pow(dtaumax,gl->sigma2); } #ifdef EMFIELD #ifdef DISTMPI void exchange_U_emfield(np_t *np, gl_t *gl){ int rankrecv,numproc,ranksend,thisrank; long i,j,k,flux; zone_t zonesend,zonerecv,zone; fluxemfield_t Ulocal; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (flux=0; flux<nfe; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; MPI_Send(Ulocal,nfe,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } if (rankrecv==thisrank) { MPI_Recv(Ulocal,nfe,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (flux=0; flux<nfe; flux++) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux]; } } } } } } MPI_Barrier(MPI_COMM_WORLD); } void exchange_U_emfield_old(np_t *np, gl_t *gl){ int rank; long i,j,k,flux; fluxemfield_t Ulocal; MPI_Comm_rank(MPI_COMM_WORLD, &rank); for_ijk (gl->domain_all,is,js,ks,ie,je,ke){ if (rank==_node_rank(gl, i, j, k) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (flux=0; flux<nfe; flux++) { Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; } } MPI_Bcast_Node(Ulocal,nfe,MPI_DOUBLE,_node_rank(gl,i,j,k),MPI_COMM_WORLD,i,j,k,gl); if (is_node_in_zone(i,j,k,gl->domain_lim) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (flux=0; flux<nfe; flux++) { np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux]; } } } MPI_Barrier(MPI_COMM_WORLD); } #endif void update_prim_emfield_mem_in_zone_1(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; //printf("(%ld,%ld) to (%ld,%ld)\n",_i(ls,gl,0),_i(ls,gl,1),_i(le,gl,0),_i(le,gl,1)); for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_1(np, gl, l); } } } void update_prim_emfield_mem_in_zone_2(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_2(np, gl, l); } } } void update_prim_emfield_mem_in_zone_3(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_3(np, gl, l); } } } #ifdef _TSEMF_STORE_COEFFICIENTS void update_prim_emfield_mem_in_zone_4(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,dim,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_inner(np[l],TYPELEVEL_EMFIELD)){ for (flux=0; flux<nfe; flux++){ find_dtau_emfield(np,gl,l,flux,&(np[l].bs->dtauemfield[flux])); np[l].bs->coeffp0sum[flux]=0.0; for (dim=0; dim<nd; dim++){ find_linearization_coefficients_inner_node_emfield(np, gl, l, dim, flux, &(np[l].bs->coeffm1[dim][flux]), &(np[l].bs->coeffp0[dim][flux]), &(np[l].bs->coeffp1[dim][flux])); np[l].bs->coeffp0sum[flux]+=np[l].bs->coeffp0[dim][flux]; } } } } } #endif void update_prim_emfield_mem_in_zone(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_1,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_2,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_3,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #ifdef _TSEMF_STORE_COEFFICIENTS sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_4,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #endif } void add_convection_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){ long l,flux; fluxemfield_t Fm1h; for (l=ls; l!=_l_plus_one(_l_plus_one(le,gl,theta),gl,theta); l=_l_plus_one(l,gl,theta)){ find_Fstar_interface_emfield(np,gl,_al(gl,l,theta,-1),_al(gl,l,theta,+0),theta,Fm1h); for (flux=0; flux<nfe; flux++){ if (l!=_l_plus_one(le,gl,theta)) np[l].bs->Resemfield[flux]-=Fm1h[flux]; if (l!=ls) np[_al(gl,l,theta,-1)].bs->Resemfield[flux]+=Fm1h[flux]; } } } void add_source_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){ long l; long flux; fluxemfield_t S; if (theta==0) { for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ find_Sstar_emfield(np,gl,l,S); for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]-=S[flux]; } } } void update_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ add_convection_residual_emfield(theta,ls,le,np,gl); add_source_residual_emfield(theta,ls,le,np,gl); } void initialize_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]=0.0e0; gl->effiter_R_emfield+=1.0e0/(double)gl->nn; } } void update_bdry_node_emfield(np_t *np, gl_t *gl, long l){ long dim,l_C,l_B,l_A; long dimsgn; bool BDRYDIRECFOUND; #ifdef _2DL long dim1; long dim2; #endif #ifdef _3D long dim3; #endif bool UPDATED; assert(is_node_bdry(np[l],TYPELEVEL_EMFIELD)); UPDATED=FALSE; BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &dim, &dimsgn); if (is_node_link(np[l],TYPELEVEL_EMFIELD)) { // in case the boundary node is a link, Uemf has already been updated UPDATED=TRUE; } if (!UPDATED && BDRYDIRECFOUND){ l_A=l; l_B=_al(gl,l,dim,dimsgn); l_C=_al(gl,l,dim,dimsgn*2); assert(is_node_inner(np[l_C],TYPELEVEL_EMFIELD)); assert(is_node_inner(np[l_B],TYPELEVEL_EMFIELD)); update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } /* now, do the corners */ if (!UPDATED) { #ifdef _2D for (dim1=-1; dim1<=1; dim1++){ for (dim2=-1; dim2<=1; dim2++){ l_C=_all(gl,l,0,dim1*2,1,dim2*2); l_B=_all(gl,l,0,dim1,1,dim2); l_A=l; if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD) && is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){ update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } } } #endif #ifdef _3D for (dim1=-1; dim1<=1; dim1++){ for (dim2=-1; dim2<=1; dim2++){ for (dim3=-1; dim3<=1; dim3++){ l_C=_al(gl, _al(gl, _al(gl,l,0,dim1*2), 1,dim2*2), 2,dim3*2); l_B=_al(gl, _al(gl, _al(gl,l,0,dim1), 1,dim2), 2,dim3); l_A=l; if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD) && is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){ update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } } } } #endif } } void update_bdry_nodes_on_segment_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_bdry(np[l],TYPELEVEL_EMFIELD)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); update_bdry_node_emfield(np, gl, l); thread_lock_node_unset(np,l,THREADTYPE_ZONE); } } } void update_bdry_nodes_emfield(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } void find_residual_emfield(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; /* now, let's find the residual and store it in bs->dUstaremfield*/ sweep_with_1D_segments(np,gl,zone,&initialize_residual_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_residual_emfield, SWEEPTYPE_IJK, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE); /* let's find max residual, and put it in gl*/ for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { np[_ai(gl,i,j,k)].bs->_xi_emfield=_xi_emfield(np[_ai(gl,i,j,k)],gl,np[_ai(gl,i,j,k)].bs->Resemfield); } } } void find_ximax_emfield(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; gl->ximax_emfield=0.0e0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD) && np[_ai(gl,i,j,k)].bs->_xi_emfield>=gl->ximax_emfield) { gl->ximax_emfield=np[_ai(gl,i,j,k)].bs->_xi_emfield; gl->i_ximax_emfield=i; gl->j_ximax_emfield=j; gl->k_ximax_emfield=k; } } } void read_UpdateEMField_arguments(char **argum, SOAP_codex_t *codex, gl_t *gl){ SOAP_substitute_all_argums(argum, codex); gl->Lc=SOAP_get_argum_double(codex,*argum,0); gl->relaxEMF=SOAP_get_argum_double(codex,*argum,1); gl->numsubiter_tsemf=4; /* make the default number of subiterations equal to 4 */ gl->tsemfmethod=TSEMF_DEFAULT; if (gl->Lc<=0.0) fatal_error("The length scale Lc must be positive when calling UpdateEMField()."); if (gl->relaxEMF<=0.0) fatal_error("The relaxation factor relaxEMF must be positive when calling UpdateEMField()."); if (gl->relaxEMF>2.0) fatal_error("The relaxation factor relaxEMF must be less than 2 when calling UpdateEMField()."); if (gl->numsubiter_tsemf<=0.0) fatal_error("The number of subiterations subiter_tsemf must be positive when calling UpdateEMField()."); #ifdef UNSTEADY gl->dt=SOAP_get_argum_double(codex,*argum,2); if (gl->dt<=0.0) fatal_error("The time step dt must be positive when calling UpdateEMField()."); if (SOAP_number_argums(*argum)>3) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,3); if (SOAP_number_argums(*argum)>4){ if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi) gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,4); else fatal_error("UpdateEMField accepts the number of subiterations as a 5th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSMEF_IMAFk, TSMEF_IMAFi is specified."); } #else if (SOAP_number_argums(*argum)>2) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,2); if (SOAP_number_argums(*argum)>3) { if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi) gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,3); else fatal_error("UpdateEMField accepts the number of subiterations as a 4th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSEMF_IMAFk, TSMEF_IMAFi is specified."); } #endif } void solve_TDMA_emfield(np_t *np, gl_t *gl, long theta, long ls, long le, int TYPELEVEL, EXM_tdmaline_t *tdma, long numlines){ #ifdef DISTMPI long line,cnt,i,j,k,i_s,j_s,k_s; double tmp; MPI_Status MPI_Status1; if (gl->EM_MPIBDRY_EXPLICIT){ EXM_solve_TDMA(tdma, numlines); } else { /* if ls node is inner node, need to obtain the tdma[0] from another process that owns ls */ if (is_node_inner(np[ls],TYPELEVEL)){ find_ijk_from_l(gl, ls, &i, &j, &k); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Recv(tdma[0].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield"); assert(tdma[0].val[0]==0.0); } for (line=0; line<numlines-1; line++){ assert(tdma[line].val[1]!=0.0); tmp = -(tdma[line+1].val[0] / tdma[line].val[1]); for (cnt = 1; cnt <= 2; cnt++) tdma[line+1].val[cnt - 1] += tdma[line].val[cnt] * tmp; tdma[line+1].val[3] += tdma[line].val[3] * tmp; tdma[line+1].val[0] = 0.0; } /* if le node is inner node, need to send the tdma[numlines-2] to another process that owns le */ if (is_node_inner(np[le],TYPELEVEL)){ find_ijk_from_l(gl, le, &i, &j, &k); find_ijk_from_l(gl, _l_minus_one(le,gl,theta), &i_s, &j_s, &k_s); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Send(tdma[numlines-2].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield"); } /* if le node is inner node, need to obtain the tdma[numlines-1] from another process that owns le */ if (is_node_inner(np[le],TYPELEVEL)){ find_ijk_from_l(gl, le, &i, &j, &k); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Recv(tdma[numlines-1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield"); assert(tdma[numlines-1].val[2]==0.0); } for (line=numlines-1; line>0; line--){ assert(tdma[line].val[1]!=0.0); tdma[line].val[3] /= tdma[line].val[1]; tdma[line].val[1] = 1.0; tdma[line-1].val[3] -= tdma[line].val[3] * tdma[line-1].val[2]; tdma[line-1].val[2] = 0.0; } assert(tdma[0].val[1]!=0.0); tdma[0].val[3] /= tdma[0].val[1]; tdma[0].val[1] = 1.0; /* if ls node is inner node, need to send the tdma[1] to another process that owns ls */ if (is_node_inner(np[ls],TYPELEVEL)){ find_ijk_from_l(gl, ls, &i, &j, &k); find_ijk_from_l(gl, _l_plus_one(ls,gl,theta), &i_s, &j_s, &k_s); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Send(tdma[1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield"); } } #else EXM_solve_TDMA(tdma, numlines); #endif } #endif//EMFIELD
make-blue-noise.c
/* Blue noise generation using the void-and-cluster method as described in * * The void-and-cluster method for dither array generation * Ulichney, Robert A (1993) * * http://cv.ulichney.com/papers/1993-void-cluster.pdf * * Note that running with openmp (-DUSE_OPENMP) will trigger additional * randomness due to computing reductions in parallel, and is not recommended * unless generating very large dither arrays. */ #include <assert.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <stdio.h> /* Booleans and utility functions */ #ifndef TRUE # define TRUE 1 #endif #ifndef FALSE # define FALSE 0 #endif typedef int bool_t; int imin (int x, int y) { return x < y ? x : y; } /* Memory allocation */ void * malloc_abc (unsigned int a, unsigned int b, unsigned int c) { if (a >= INT32_MAX / b) return NULL; else if (a * b >= INT32_MAX / c) return NULL; else return malloc (a * b * c); } /* Random number generation */ typedef uint32_t xorwow_state_t[5]; uint32_t xorwow_next (xorwow_state_t *state) { uint32_t s = (*state)[0], t = (*state)[3]; (*state)[3] = (*state)[2]; (*state)[2] = (*state)[1]; (*state)[1] = s; t ^= t >> 2; t ^= t << 1; t ^= s ^ (s << 4); (*state)[0] = t; (*state)[4] += 362437; return t + (*state)[4]; } float xorwow_float (xorwow_state_t *s) { return (xorwow_next (s) >> 9) / (float)((1 << 23) - 1); } /* Floating point matrices * * Used to cache the cluster sizes. */ typedef struct matrix_t { int width; int height; float *buffer; } matrix_t; bool_t matrix_init (matrix_t *matrix, int width, int height) { float *buffer; if (!matrix) return FALSE; buffer = malloc_abc (width, height, sizeof (float)); if (!buffer) return FALSE; matrix->buffer = buffer; matrix->width = width; matrix->height = height; return TRUE; } bool_t matrix_copy (matrix_t *dst, matrix_t const *src) { float *srcbuf = src->buffer, *srcend = src->buffer + src->width * src->height, *dstbuf = dst->buffer; if (dst->width != src->width || dst->height != src->height) return FALSE; while (srcbuf < srcend) *dstbuf++ = *srcbuf++; return TRUE; } float * matrix_get (matrix_t *matrix, int x, int y) { return &matrix->buffer[y * matrix->width + x]; } void matrix_destroy (matrix_t *matrix) { free (matrix->buffer); } /* Binary patterns */ typedef struct pattern_t { int width; int height; bool_t *buffer; } pattern_t; bool_t pattern_init (pattern_t *pattern, int width, int height) { bool_t *buffer; if (!pattern) return FALSE; buffer = malloc_abc (width, height, sizeof (bool_t)); if (!buffer) return FALSE; pattern->buffer = buffer; pattern->width = width; pattern->height = height; return TRUE; } bool_t pattern_copy (pattern_t *dst, pattern_t const *src) { bool_t *srcbuf = src->buffer, *srcend = src->buffer + src->width * src->height, *dstbuf = dst->buffer; if (dst->width != src->width || dst->height != src->height) return FALSE; while (srcbuf < srcend) *dstbuf++ = *srcbuf++; return TRUE; } bool_t * pattern_get (pattern_t *pattern, int x, int y) { return &pattern->buffer[y * pattern->width + x]; } void pattern_fill_white_noise (pattern_t *pattern, float fraction, xorwow_state_t *s) { bool_t *buffer = pattern->buffer; bool_t *end = buffer + (pattern->width * pattern->height); while (buffer < end) *buffer++ = xorwow_float (s) < fraction; } void pattern_destroy (pattern_t *pattern) { free (pattern->buffer); } /* Dither arrays */ typedef struct array_t { int width; int height; uint32_t *buffer; } array_t; bool_t array_init (array_t *array, int width, int height) { uint32_t *buffer; if (!array) return FALSE; buffer = malloc_abc (width, height, sizeof (uint32_t)); if (!buffer) return FALSE; array->buffer = buffer; array->width = width; array->height = height; return TRUE; } uint32_t * array_get (array_t *array, int x, int y) { return &array->buffer[y * array->width + x]; } bool_t array_save_ppm (array_t *array, const char *filename) { FILE *f = fopen(filename, "wb"); int i = 0; int bpp = 2; uint8_t buffer[1024]; if (!f) return FALSE; if (array->width * array->height - 1 < 256) bpp = 1; fprintf(f, "P5 %d %d %d\n", array->width, array->height, array->width * array->height - 1); while (i < array->width * array->height) { int j = 0; for (; j < 1024 / bpp && j < array->width * array->height; ++j) { uint32_t v = array->buffer[i + j]; if (bpp == 2) { buffer[2 * j] = v & 0xff; buffer[2 * j + 1] = (v & 0xff00) >> 8; } else { buffer[j] = v; } } fwrite((void *)buffer, bpp, j, f); i += j; } if (fclose(f) != 0) return FALSE; return TRUE; } bool_t array_save (array_t *array, const char *filename) { int x, y; FILE *f = fopen(filename, "wb"); if (!f) return FALSE; fprintf (f, "/* WARNING: This file is generated by make-blue-noise.c\n" " * Please edit that file instead of this one.\n" " */\n" "\n" "#ifndef BLUE_NOISE_%dX%d_H\n" "#define BLUE_NOISE_%dX%d_H\n" "\n" "#include <stdint.h>\n" "\n", array->width, array->height, array->width, array->height); fprintf (f, "static const uint16_t dither_blue_noise_%dx%d[%d] = {\n", array->width, array->height, array->width * array->height); for (y = 0; y < array->height; ++y) { fprintf (f, " "); for (x = 0; x < array->width; ++x) { if (x != 0) fprintf (f, ", "); fprintf (f, "%d", *array_get (array, x, y)); } fprintf (f, ",\n"); } fprintf (f, "};\n"); fprintf (f, "\n#endif /* BLUE_NOISE_%dX%d_H */\n", array->width, array->height); if (fclose(f) != 0) return FALSE; return TRUE; } void array_destroy (array_t *array) { free (array->buffer); } /* Dither array generation */ bool_t compute_cluster_sizes (pattern_t *pattern, matrix_t *matrix) { int width = pattern->width, height = pattern->height; if (matrix->width != width || matrix->height != height) return FALSE; int px, py, qx, qy, dx, dy; float tsqsi = 2.f * 1.5f * 1.5f; #ifdef USE_OPENMP #pragma omp parallel for default (none) \ private (py, px, qy, qx, dx, dy) \ shared (height, width, pattern, matrix, tsqsi) #endif for (py = 0; py < height; ++py) { for (px = 0; px < width; ++px) { bool_t pixel = *pattern_get (pattern, px, py); float dist = 0.f; for (qx = 0; qx < width; ++qx) { dx = imin (abs (qx - px), width - abs (qx - px)); dx = dx * dx; for (qy = 0; qy < height; ++qy) { dy = imin (abs (qy - py), height - abs (qy - py)); dy = dy * dy; dist += (pixel == *pattern_get (pattern, qx, qy)) * expf (- (dx + dy) / tsqsi); } } *matrix_get (matrix, px, py) = dist; } } return TRUE; } bool_t swap_pixel (pattern_t *pattern, matrix_t *matrix, int x, int y) { int width = pattern->width, height = pattern->height; bool_t new; float f, dist = 0.f, tsqsi = 2.f * 1.5f * 1.5f; int px, py, dx, dy; bool_t b; new = !*pattern_get (pattern, x, y); *pattern_get (pattern, x, y) = new; if (matrix->width != width || matrix->height != height) return FALSE; #ifdef USE_OPENMP #pragma omp parallel for reduction (+:dist) default (none) \ private (px, py, dx, dy, b, f) \ shared (x, y, width, height, pattern, matrix, new, tsqsi) #endif for (py = 0; py < height; ++py) { dy = imin (abs (py - y), height - abs (py - y)); dy = dy * dy; for (px = 0; px < width; ++px) { dx = imin (abs (px - x), width - abs (px - x)); dx = dx * dx; b = (*pattern_get (pattern, px, py) == new); f = expf (- (dx + dy) / tsqsi); *matrix_get (matrix, px, py) += (2 * b - 1) * f; dist += b * f; } } *matrix_get (matrix, x, y) = dist; return TRUE; } void largest_cluster (pattern_t *pattern, matrix_t *matrix, bool_t pixel, int *xmax, int *ymax) { int width = pattern->width, height = pattern->height; int x, y; float vmax = -INFINITY; #ifdef USE_OPENMP #pragma omp parallel default (none) \ private (x, y) \ shared (height, width, pattern, matrix, pixel, xmax, ymax, vmax) #endif { int xbest = -1, ybest = -1; #ifdef USE_OPENMP float vbest = -INFINITY; #pragma omp for reduction (max: vmax) collapse (2) #endif for (y = 0; y < height; ++y) { for (x = 0; x < width; ++x) { if (*pattern_get (pattern, x, y) != pixel) continue; if (*matrix_get (matrix, x, y) > vmax) { vmax = *matrix_get (matrix, x, y); #ifdef USE_OPENMP vbest = vmax; #endif xbest = x; ybest = y; } } } #ifdef USE_OPENMP #pragma omp barrier #pragma omp critical { if (vmax == vbest) { *xmax = xbest; *ymax = ybest; } } #else *xmax = xbest; *ymax = ybest; #endif } assert (vmax > -INFINITY); } void generate_initial_binary_pattern (pattern_t *pattern, matrix_t *matrix) { int xcluster = 0, ycluster = 0, xvoid = 0, yvoid = 0; for (;;) { largest_cluster (pattern, matrix, TRUE, &xcluster, &ycluster); assert (*pattern_get (pattern, xcluster, ycluster) == TRUE); swap_pixel (pattern, matrix, xcluster, ycluster); largest_cluster (pattern, matrix, FALSE, &xvoid, &yvoid); assert (*pattern_get (pattern, xvoid, yvoid) == FALSE); swap_pixel (pattern, matrix, xvoid, yvoid); if (xcluster == xvoid && ycluster == yvoid) return; } } bool_t generate_dither_array (array_t *array, pattern_t const *prototype, matrix_t const *matrix, pattern_t *temp_pattern, matrix_t *temp_matrix) { int width = prototype->width, height = prototype->height; int x, y, rank; int initial_rank = 0; if (array->width != width || array->height != height) return FALSE; // Make copies of the prototype and associated sizes matrix since we will // trash them if (!pattern_copy (temp_pattern, prototype)) return FALSE; if (!matrix_copy (temp_matrix, matrix)) return FALSE; // Compute initial rank for (y = 0; y < height; ++y) { for (x = 0; x < width; ++x) { if (*pattern_get (temp_pattern, x, y)) initial_rank += 1; *array_get (array, x, y) = 0; } } // Phase 1 for (rank = initial_rank; rank > 0; --rank) { largest_cluster (temp_pattern, temp_matrix, TRUE, &x, &y); swap_pixel (temp_pattern, temp_matrix, x, y); *array_get (array, x, y) = rank - 1; } // Make copies again for phases 2 & 3 if (!pattern_copy (temp_pattern, prototype)) return FALSE; if (!matrix_copy (temp_matrix, matrix)) return FALSE; // Phase 2 & 3 for (rank = initial_rank; rank < width * height; ++rank) { largest_cluster (temp_pattern, temp_matrix, FALSE, &x, &y); swap_pixel (temp_pattern, temp_matrix, x, y); *array_get (array, x, y) = rank; } return TRUE; } bool_t generate (int size, xorwow_state_t *s, char const *c_filename, char const *ppm_filename) { bool_t ok = TRUE; pattern_t prototype, temp_pattern; array_t array; matrix_t matrix, temp_matrix; printf ("Generating %dx%d blue noise...\n", size, size); if (!pattern_init (&prototype, size, size)) return FALSE; if (!pattern_init (&temp_pattern, size, size)) { pattern_destroy (&prototype); return FALSE; } if (!matrix_init (&matrix, size, size)) { pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return FALSE; } if (!matrix_init (&temp_matrix, size, size)) { matrix_destroy (&matrix); pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return FALSE; } if (!array_init (&array, size, size)) { matrix_destroy (&temp_matrix); matrix_destroy (&matrix); pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return FALSE; } printf("Filling initial binary pattern with white noise...\n"); pattern_fill_white_noise (&prototype, .1, s); printf("Initializing cluster sizes...\n"); if (!compute_cluster_sizes (&prototype, &matrix)) { fprintf (stderr, "Error while computing cluster sizes\n"); ok = FALSE; goto out; } printf("Generating initial binary pattern...\n"); generate_initial_binary_pattern (&prototype, &matrix); printf("Generating dither array...\n"); if (!generate_dither_array (&array, &prototype, &matrix, &temp_pattern, &temp_matrix)) { fprintf (stderr, "Error while generating dither array\n"); ok = FALSE; goto out; } printf("Saving dither array...\n"); if (!array_save (&array, c_filename)) { fprintf (stderr, "Error saving dither array\n"); ok = FALSE; goto out; } #if SAVE_PPM if (!array_save_ppm (&array, ppm_filename)) { fprintf (stderr, "Error saving dither array PPM\n"); ok = FALSE; goto out; } #else (void)ppm_filename; #endif printf("All done!\n"); out: array_destroy (&array); matrix_destroy (&temp_matrix); matrix_destroy (&matrix); pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return ok; } int main (void) { xorwow_state_t s = {1185956906, 12385940, 983948, 349208051, 901842}; if (!generate (64, &s, "blue-noise-64x64.h", "blue-noise-64x64.ppm")) return -1; return 0; }
trmm.pluto_ancc.seq_par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> double A[N][N+20]; double B[N][N+20]; void init_arrays() { int i,j; for (i=0; i<N; i++) for (j=0; j<N; j++) { B[i][j] = (i+j) % 5 + 1; if (i < j) A[i][j] = (i+j) % 5 + 1; else if (i == j) A[i][j] = 1; else A[i][j] = -1; } } void print_array() { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { fprintf(stderr, "%lf ", round(B[i][j])); if (j%80 == 79) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); register int i,j,k,t; register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t; register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6, newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12; register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6, newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12; #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.26s. */ lb1=0; ub1=floord(N-1,1024); #pragma omp parallel for shared(lb1,ub1) private(c1,c2,c3,c4,c5,c6,c7,c8,c9) for (c1=lb1; c1<=ub1; c1++) { { for (c2=0; c2<=floord(N-2,512); c2++ ) { for (c3=max(0,ceild(256*c2-127,128)); c3<=floord(N-1,256); c3++ ) { for (c4=max(8*c1,0); c4<=min(floord(N-1,128),8*c1+7); c4++ ) { for (c5=max(0,4*c2); c5<=min(min(4*c2+3,floord(N-2,128)),floord(128*c3+127,64)); c5++ ) { for (c8t=max(0,128*c5); c8t<=min(min(128*c5+127,N-2),256*c3+254)-10; c8t=c8t+11) { newlb_c9=-2147483648; newub_c9=min(N-1,256*c3+255); register int cbv_1; cbv_1=c8t+10; #pragma ivdep #pragma vector always for (c8=c8t; c8<=cbv_1; c8=c8+1) { newlb_c9=max(newlb_c9,max(c8+1,256*c3)); } for (c8=c8t; c8<=c8t+10; c8=c8+1) { for (c9=max(c8+1,256*c3); c9<=newlb_c9-1; c9=c9+1) { register int cbv_2, cbv_3; cbv_2=max(0,128*c4); cbv_3=min(N-1,128*c4+127); #pragma ivdep #pragma vector always for (c7=cbv_2; c7<=cbv_3; c7++ ) { double scv_1; scv_1=B[c8][c7]; scv_1=alpha*A[c8][c9]*B[c9][c7]+scv_1; B[c8][c7]=scv_1; } } } for (c9t=newlb_c9; c9t<=newub_c9-7; c9t=c9t+8) { register int cbv_4, cbv_5; cbv_4=max(0,128*c4); cbv_5=min(N-1,128*c4+127); #pragma ivdep #pragma vector always for (c7=cbv_4; c7<=cbv_5; c7++ ) { double scv_2, scv_3, scv_4, scv_5, scv_6, scv_7, scv_8, scv_9; double scv_10, scv_11, scv_12, scv_13, scv_14, scv_15, scv_16, scv_17; double scv_18, scv_19, scv_20; scv_2=B[(c9t+1)][c7]; scv_3=B[(c9t+2)][c7]; scv_4=B[(c9t+3)][c7]; scv_5=B[(c9t+6)][c7]; scv_6=B[(c8t+8)][c7]; scv_7=B[(c8t+2)][c7]; scv_8=B[(c8t+4)][c7]; scv_9=B[(c8t+1)][c7]; scv_10=B[(c8t+10)][c7]; scv_11=B[(c9t+5)][c7]; scv_12=B[(c8t+5)][c7]; scv_13=B[c9t][c7]; scv_14=B[(c9t+7)][c7]; scv_15=B[(c8t+9)][c7]; scv_16=B[(c8t+7)][c7]; scv_17=B[(c9t+4)][c7]; scv_18=B[c8t][c7]; scv_19=B[(c8t+3)][c7]; scv_20=B[(c8t+6)][c7]; scv_18=alpha*A[c8t][c9t]*scv_13+scv_18; scv_18=alpha*A[c8t][(c9t+1)]*scv_2+scv_18; scv_18=alpha*A[c8t][(c9t+2)]*scv_3+scv_18; scv_18=alpha*A[c8t][(c9t+3)]*scv_4+scv_18; scv_18=alpha*A[c8t][(c9t+4)]*scv_17+scv_18; scv_18=alpha*A[c8t][(c9t+5)]*scv_11+scv_18; scv_18=alpha*A[c8t][(c9t+6)]*scv_5+scv_18; scv_18=alpha*A[c8t][(c9t+7)]*scv_14+scv_18; scv_9=alpha*A[(c8t+1)][c9t]*scv_13+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+1)]*scv_2+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+2)]*scv_3+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+3)]*scv_4+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+4)]*scv_17+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+5)]*scv_11+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+6)]*scv_5+scv_9; scv_9=alpha*A[(c8t+1)][(c9t+7)]*scv_14+scv_9; scv_7=alpha*A[(c8t+2)][c9t]*scv_13+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+1)]*scv_2+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+2)]*scv_3+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+3)]*scv_4+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+4)]*scv_17+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+5)]*scv_11+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+6)]*scv_5+scv_7; scv_7=alpha*A[(c8t+2)][(c9t+7)]*scv_14+scv_7; scv_19=alpha*A[(c8t+3)][c9t]*scv_13+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+1)]*scv_2+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+2)]*scv_3+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+3)]*scv_4+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+4)]*scv_17+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+5)]*scv_11+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+6)]*scv_5+scv_19; scv_19=alpha*A[(c8t+3)][(c9t+7)]*scv_14+scv_19; scv_8=alpha*A[(c8t+4)][c9t]*scv_13+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+1)]*scv_2+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+2)]*scv_3+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+3)]*scv_4+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+4)]*scv_17+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+5)]*scv_11+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+6)]*scv_5+scv_8; scv_8=alpha*A[(c8t+4)][(c9t+7)]*scv_14+scv_8; scv_12=alpha*A[(c8t+5)][c9t]*scv_13+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+1)]*scv_2+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+2)]*scv_3+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+3)]*scv_4+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+4)]*scv_17+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+5)]*scv_11+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+6)]*scv_5+scv_12; scv_12=alpha*A[(c8t+5)][(c9t+7)]*scv_14+scv_12; scv_20=alpha*A[(c8t+6)][c9t]*scv_13+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+1)]*scv_2+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+2)]*scv_3+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+3)]*scv_4+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+4)]*scv_17+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+5)]*scv_11+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+6)]*scv_5+scv_20; scv_20=alpha*A[(c8t+6)][(c9t+7)]*scv_14+scv_20; scv_16=alpha*A[(c8t+7)][c9t]*scv_13+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+1)]*scv_2+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+2)]*scv_3+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+3)]*scv_4+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+4)]*scv_17+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+5)]*scv_11+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+6)]*scv_5+scv_16; scv_16=alpha*A[(c8t+7)][(c9t+7)]*scv_14+scv_16; scv_6=alpha*A[(c8t+8)][c9t]*scv_13+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+1)]*scv_2+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+2)]*scv_3+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+3)]*scv_4+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+4)]*scv_17+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+5)]*scv_11+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+6)]*scv_5+scv_6; scv_6=alpha*A[(c8t+8)][(c9t+7)]*scv_14+scv_6; scv_15=alpha*A[(c8t+9)][c9t]*scv_13+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+1)]*scv_2+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+2)]*scv_3+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+3)]*scv_4+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+4)]*scv_17+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+5)]*scv_11+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+6)]*scv_5+scv_15; scv_15=alpha*A[(c8t+9)][(c9t+7)]*scv_14+scv_15; scv_10=alpha*A[(c8t+10)][c9t]*scv_13+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+1)]*scv_2+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+2)]*scv_3+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+3)]*scv_4+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+4)]*scv_17+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+5)]*scv_11+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+6)]*scv_5+scv_10; scv_10=alpha*A[(c8t+10)][(c9t+7)]*scv_14+scv_10; B[(c8t+8)][c7]=scv_6; B[(c8t+2)][c7]=scv_7; B[(c8t+4)][c7]=scv_8; B[(c8t+1)][c7]=scv_9; B[(c8t+10)][c7]=scv_10; B[(c8t+5)][c7]=scv_12; B[(c8t+9)][c7]=scv_15; B[(c8t+7)][c7]=scv_16; B[c8t][c7]=scv_18; B[(c8t+3)][c7]=scv_19; B[(c8t+6)][c7]=scv_20; } } for (c9=c9t; c9<=newub_c9; c9=c9+1) { register int cbv_6, cbv_7; cbv_6=max(0,128*c4); cbv_7=min(N-1,128*c4+127); #pragma ivdep #pragma vector always for (c7=cbv_6; c7<=cbv_7; c7++ ) { double scv_21, scv_22, scv_23, scv_24, scv_25, scv_26, scv_27, scv_28; double scv_29, scv_30, scv_31, scv_32; scv_21=B[(c8t+7)][c7]; scv_22=B[c8t][c7]; scv_23=B[(c8t+10)][c7]; scv_24=B[(c8t+5)][c7]; scv_25=B[(c8t+8)][c7]; scv_26=B[(c8t+3)][c7]; scv_27=B[(c8t+6)][c7]; scv_28=B[(c8t+2)][c7]; scv_29=B[(c8t+1)][c7]; scv_30=B[c9][c7]; scv_31=B[(c8t+9)][c7]; scv_32=B[(c8t+4)][c7]; scv_22=alpha*A[c8t][c9]*scv_30+scv_22; scv_29=alpha*A[(c8t+1)][c9]*scv_30+scv_29; scv_28=alpha*A[(c8t+2)][c9]*scv_30+scv_28; scv_26=alpha*A[(c8t+3)][c9]*scv_30+scv_26; scv_32=alpha*A[(c8t+4)][c9]*scv_30+scv_32; scv_24=alpha*A[(c8t+5)][c9]*scv_30+scv_24; scv_27=alpha*A[(c8t+6)][c9]*scv_30+scv_27; scv_21=alpha*A[(c8t+7)][c9]*scv_30+scv_21; scv_25=alpha*A[(c8t+8)][c9]*scv_30+scv_25; scv_31=alpha*A[(c8t+9)][c9]*scv_30+scv_31; scv_23=alpha*A[(c8t+10)][c9]*scv_30+scv_23; B[(c8t+7)][c7]=scv_21; B[c8t][c7]=scv_22; B[(c8t+10)][c7]=scv_23; B[(c8t+5)][c7]=scv_24; B[(c8t+8)][c7]=scv_25; B[(c8t+3)][c7]=scv_26; B[(c8t+6)][c7]=scv_27; B[(c8t+2)][c7]=scv_28; B[(c8t+1)][c7]=scv_29; B[(c8t+9)][c7]=scv_31; B[(c8t+4)][c7]=scv_32; } } for (c8=c8t; c8<=c8t+10; c8=c8+1) { for (c9=newub_c9+1; c9<=min(N-1,256*c3+255); c9=c9+1) { register int cbv_8, cbv_9; cbv_8=max(0,128*c4); cbv_9=min(N-1,128*c4+127); #pragma ivdep #pragma vector always for (c7=cbv_8; c7<=cbv_9; c7++ ) { double scv_33; scv_33=B[c8][c7]; scv_33=alpha*A[c8][c9]*B[c9][c7]+scv_33; B[c8][c7]=scv_33; } } } } for (c8=c8t; c8<=min(min(128*c5+127,N-2),256*c3+254); c8=c8+1) { for (c9t=max(c8+1,256*c3); c9t<=min(N-1,256*c3+255)-7; c9t=c9t+8) { register int cbv_10, cbv_11; cbv_10=max(0,128*c4); cbv_11=min(N-1,128*c4+127); #pragma ivdep #pragma vector always for (c7=cbv_10; c7<=cbv_11; c7++ ) { double scv_34; scv_34=B[c8][c7]; scv_34=alpha*A[c8][c9t]*B[c9t][c7]+scv_34; scv_34=alpha*A[c8][(c9t+1)]*B[(c9t+1)][c7]+scv_34; scv_34=alpha*A[c8][(c9t+2)]*B[(c9t+2)][c7]+scv_34; scv_34=alpha*A[c8][(c9t+3)]*B[(c9t+3)][c7]+scv_34; scv_34=alpha*A[c8][(c9t+4)]*B[(c9t+4)][c7]+scv_34; scv_34=alpha*A[c8][(c9t+5)]*B[(c9t+5)][c7]+scv_34; scv_34=alpha*A[c8][(c9t+6)]*B[(c9t+6)][c7]+scv_34; scv_34=alpha*A[c8][(c9t+7)]*B[(c9t+7)][c7]+scv_34; B[c8][c7]=scv_34; } } for (c9=c9t; c9<=min(N-1,256*c3+255); c9=c9+1) { register int cbv_12, cbv_13; cbv_12=max(0,128*c4); cbv_13=min(N-1,128*c4+127); #pragma ivdep #pragma vector always for (c7=cbv_12; c7<=cbv_13; c7++ ) { double scv_35; scv_35=B[c8][c7]; scv_35=alpha*A[c8][c9]*B[c9][c7]+scv_35; B[c8][c7]=scv_35; } } } } } } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); //print_array(); return ((int) B[0][0]); }
test_nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the OpenMP 4.5 NVECTOR * module implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_types.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #include "test_nvector.h" #include <omp.h> /* OpenMPDEV vector specific tests */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid); /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype length; /* vector length */ N_Vector U, V, W, X, Y, Z; /* test vectors */ int print_timing; /* turn timing on/off */ Test_Init(NULL); /* check input and set vector length */ if (argc < 3){ printf("ERROR: TWO (2) Inputs required: vector length and print timing \n"); Test_Abort(-1); } length = (sunindextype) atol(argv[1]); if (length <= 0) { printf("ERROR: length of vector must be a positive integer \n"); Test_Abort(-1); } print_timing = atoi(argv[2]); SetTiming(print_timing, 0); printf("Testing the OpenMPDEV N_Vector \n"); printf("Vector length %ld \n", (long int) length); printf("\n omp_get_default_device = %d \n", omp_get_default_device()); printf("\n omp_get_num_devices = %d \n", omp_get_num_devices()); printf("\n omp_get_initial_device = %d \n", omp_get_initial_device()); printf("\n omp_is_initial_device = %d \n", omp_is_initial_device()); /* Create new vectors */ W = N_VNewEmpty_OpenMPDEV(length, sunctx); if (W == NULL) { printf("FAIL: Unable to create a new empty vector \n\n"); Test_Abort(1); } X = N_VNew_OpenMPDEV(length, sunctx); if (X == NULL) { N_VDestroy(W); printf("FAIL: Unable to create a new vector \n\n"); Test_Abort(1); } /* Check vector ID */ fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_OPENMPDEV, 0); /* Check vector length */ fails += Test_N_VGetLength(X, 0); /* Check vector communicator */ fails += Test_N_VGetCommunicator(X, NULL, 0); /* Test clone functions */ fails += Test_N_VCloneEmpty(X, 0); fails += Test_N_VClone(X, length, 0); fails += Test_N_VCloneEmptyVectorArray(5, X, 0); fails += Test_N_VCloneVectorArray(5, X, length, 0); /* Clone additional vectors for testing */ Y = N_VClone(X); if (Y == NULL) { N_VDestroy(W); N_VDestroy(X); printf("FAIL: Unable to create a new vector \n\n"); Test_Abort(1); } Z = N_VClone(X); if (Z == NULL) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); printf("FAIL: Unable to create a new vector \n\n"); Test_Abort(1); } /* Standard vector operation tests */ printf("\nTesting standard vector operations:\n\n"); fails += Test_N_VConst(X, length, 0); fails += Test_N_VLinearSum(X, Y, Z, length, 0); fails += Test_N_VProd(X, Y, Z, length, 0); fails += Test_N_VDiv(X, Y, Z, length, 0); fails += Test_N_VScale(X, Z, length, 0); fails += Test_N_VAbs(X, Z, length, 0); fails += Test_N_VInv(X, Z, length, 0); fails += Test_N_VAddConst(X, Z, length, 0); fails += Test_N_VDotProd(X, Y, length, 0); fails += Test_N_VMaxNorm(X, length, 0); fails += Test_N_VWrmsNorm(X, Y, length, 0); fails += Test_N_VWrmsNormMask(X, Y, Z, length, 0); fails += Test_N_VMin(X, length, 0); fails += Test_N_VWL2Norm(X, Y, length, 0); fails += Test_N_VL1Norm(X, length, 0); fails += Test_N_VCompare(X, Z, length, 0); fails += Test_N_VInvTest(X, Z, length, 0); fails += Test_N_VConstrMask(X, Y, Z, length, 0); fails += Test_N_VMinQuotient(X, Y, length, 0); /* Fused and vector array operations tests (disabled) */ printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = N_VNew_OpenMPDEV(length, sunctx); retval = N_VEnableFusedOps_OpenMPDEV(U, SUNFALSE); if (U == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); printf("FAIL: Unable to create a new vector \n\n"); Test_Abort(1); } /* fused operations */ fails += Test_N_VLinearCombination(U, length, 0); fails += Test_N_VScaleAddMulti(U, length, 0); fails += Test_N_VDotProdMulti(U, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(U, length, 0); fails += Test_N_VScaleVectorArray(U, length, 0); fails += Test_N_VConstVectorArray(U, length, 0); fails += Test_N_VWrmsNormVectorArray(U, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(U, length, 0); fails += Test_N_VScaleAddMultiVectorArray(U, length, 0); fails += Test_N_VLinearCombinationVectorArray(U, length, 0); /* Fused and vector array operations tests (enabled) */ printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = N_VNew_OpenMPDEV(length, sunctx); retval = N_VEnableFusedOps_OpenMPDEV(V, SUNTRUE); if (V == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); printf("FAIL: Unable to create a new vector \n\n"); Test_Abort(1); } /* fused operations */ fails += Test_N_VLinearCombination(V, length, 0); fails += Test_N_VScaleAddMulti(V, length, 0); fails += Test_N_VDotProdMulti(V, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(V, length, 0); fails += Test_N_VScaleVectorArray(V, length, 0); fails += Test_N_VConstVectorArray(V, length, 0); fails += Test_N_VWrmsNormVectorArray(V, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(V, length, 0); fails += Test_N_VScaleAddMultiVectorArray(V, length, 0); fails += Test_N_VLinearCombinationVectorArray(V, length, 0); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(X, Y, length, 0); fails += Test_N_VMaxNormLocal(X, length, 0); fails += Test_N_VMinLocal(X, length, 0); fails += Test_N_VL1NormLocal(X, length, 0); fails += Test_N_VWSqrSumLocal(X, Y, length, 0); fails += Test_N_VWSqrSumMaskLocal(X, Y, Z, length, 0); fails += Test_N_VInvTestLocal(X, Z, length, 0); fails += Test_N_VConstrMaskLocal(X, Y, Z, length, 0); fails += Test_N_VMinQuotientLocal(X, Y, length, 0); /* local fused reduction operations */ printf("\nTesting local fused reduction operations:\n\n"); fails += Test_N_VDotProdMultiLocal(V, length, 0); /* Free vectors */ N_VDestroy(U); N_VDestroy(V); N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); /* Print result */ if (fails) { printf("FAIL: NVector module failed %i tests \n\n", fails); } else { printf("SUCCESS: NVector module passed all tests \n\n"); } Test_Finalize(); return(fails); } /* ---------------------------------------------------------------------- * OpenMPDEV specific tests * --------------------------------------------------------------------*/ /* -------------------------------------------------------------------- * Test for the CUDA N_Vector N_VMake_OpenMPDEV function. Requires N_VConst * to check data. */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid) { int failure = 0; realtype *h_data, *d_data; N_Vector Y; N_VConst(NEG_HALF, X); N_VCopyFromDevice_OpenMPDEV(X); h_data = N_VGetHostArrayPointer_OpenMPDEV(X); d_data = N_VGetDeviceArrayPointer_OpenMPDEV(X); /* Case 1: h_data and d_data are not null */ Y = N_VMake_OpenMPDEV(length, h_data, d_data); if (Y == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector is NULL \n \n"); return(1); } if (N_VGetHostArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector host data == NULL \n \n"); N_VDestroy(Y); return(1); } if (N_VGetDeviceArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector device data -= NULL \n \n"); N_VDestroy(Y); return(1); } failure += check_ans(NEG_HALF, Y, length); if (failure) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 1, Proc %d \n", myid); printf(" Failed N_VConst check \n \n"); N_VDestroy(Y); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 1 \n"); } N_VDestroy(Y); /* Case 2: data is null */ Y = N_VMake_OpenMPDEV(length, NULL, NULL); if (Y != NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 2, Proc %d \n", myid); printf(" Vector is not NULL \n \n"); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 2 \n"); } N_VDestroy(Y); return(failure); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector X, sunindextype local_length) { int failure = 0; sunindextype i; realtype *Xdata; N_VCopyFromDevice_OpenMPDEV(X); Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); /* check vector data */ for (i = 0; i < local_length; i++) { failure += SUNRCompare(Xdata[i], ans); } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector X) { realtype *Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); if (Xdata == NULL) return SUNFALSE; else return SUNTRUE; } void set_element(N_Vector X, sunindextype i, realtype val) { set_element_range(X, i, i, val); } void set_element_range(N_Vector X, sunindextype is, sunindextype ie, realtype val) { realtype *xdev; int dev; sunindextype i; xdev = N_VGetDeviceArrayPointer_OpenMPDEV(X); dev = omp_get_default_device(); /* set elements [is,ie] of the data array */ #pragma omp target map(to:is,ie,val) is_device_ptr(xdev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) { for(i = is; i <= ie; i++) xdev[i] = val; } } realtype get_element(N_Vector X, sunindextype i) { realtype *data; N_VCopyFromDevice_OpenMPDEV(X); data = N_VGetHostArrayPointer_OpenMPDEV(X); return data[i]; } double max_time(N_Vector X, double time) { /* not running in parallel, just return input time */ return(time); } void sync_device(N_Vector x) { /* not running on DEV, just return */ return; }
reconstruction.c
/*--------------------------------------------------------------------------------- RECONSTRUCTION.C -Linear, WENO and MP5 reconstruction algorithms ---------------------------------------------------------------------------------*/ #include "decs.h" #if RECONSTRUCTION == LINEAR #define RECON_ALGO linear_mc #elif RECONSTRUCTION == WENO #define RECON_ALGO weno #elif RECONSTRUCTION == MP5 #define RECON_ALGO mp5 #else #error "Reconstruction not specified!" #endif // Sanity checks #if (RECONSTRUCTION == WENO || RECONSTRUCTION == MP5) && NG < 3 #error "not enough ghost zones! PPM/WENO/MP5 + NG < 3\n" #endif void linear_mc(double unused1, double x1, double x2, double x3, double unused2, double *lout, double *rout); void weno(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout); double median(double a, double b, double c); double mp5_subcalc(double Fjm2, double Fjm1, double Fj, double Fjp1, double Fjp2); void mp5(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout); // Linear reconstruction with MC slope limiter inline void linear_mc(double unused1, double x1, double x2, double x3, double unused2, double *lout, double *rout) { double Dqm,Dqp,Dqc,s; Dqm = 2. * (x2 - x1); Dqp = 2. * (x3 - x2); Dqc = 0.5 * (x3 - x1); s = Dqm * Dqp; if (s <= 0.) s = 0.; else { if (fabs(Dqm) < fabs(Dqp) && fabs(Dqm) < fabs(Dqc)) s = Dqm; else if (fabs(Dqp) < fabs(Dqc)) s = Dqp; else s = Dqc; } // Reconstruct left, right *lout = x2 - 0.5*s; *rout = x2 + 0.5*s; } // WENO interpolation. See Tchekhovskoy et al. 2007 (T07), Shu 2011 (S11) // Implemented by Monika Moscibrodzka inline void weno(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout) { // S11 1, 2, 3 double vr[3], vl[3]; vr[0] = (3./8.)*x1 - (5./4.)*x2 + (15./8.)*x3; vr[1] = (-1./8.)*x2 + (3./4.)*x3 + (3./8.)*x4; vr[2] = (3./8.)*x3 + (3./4.)*x4 - (1./8.)*x5; vl[0] = (3./8.)*x5 - (5./4.)*x4 + (15./8.)*x3; vl[1] = (-1./8.)*x4 + (3./4.)*x3 + (3./8.)*x2; vl[2] = (3./8.)*x3 + (3./4.)*x2 - (1./8.)*x1; // Smoothness indicators, T07 A18 or S11 8 double beta[3]; beta[0] = (13./12.)*pow(x1 - 2.*x2 + x3, 2) + (1./4.)*pow(x1 - 4.*x2 + 3.*x3, 2); beta[1] = (13./12.)*pow(x2 - 2.*x3 + x4, 2) + (1./4.)*pow(x4 - x2, 2); beta[2] = (13./12.)*pow(x3 - 2.*x4 + x5, 2) + (1./4.)*pow(x5 - 4.*x4 + 3.*x3, 2); // Nonlinear weights S11 9 double den, wtr[3], Wr, wr[3], wtl[3], Wl, wl[3], eps; eps=1.e-26; den = eps + beta[0]; den *= den; wtr[0] = (1./16.)/den; den = eps + beta[1]; den *= den; wtr[1] = (5./8. )/den; den = eps + beta[2]; den *= den; wtr[2] = (5./16.)/den; Wr = wtr[0] + wtr[1] + wtr[2]; wr[0] = wtr[0]/Wr ; wr[1] = wtr[1]/Wr ; wr[2] = wtr[2]/Wr ; den = eps + beta[2]; den *= den; wtl[0] = (1./16.)/den; den = eps + beta[1]; den *= den; wtl[1] = (5./8. )/den; den = eps + beta[0]; den *= den; wtl[2] = (5./16.)/den; Wl = wtl[0] + wtl[1] + wtl[2]; wl[0] = wtl[0]/Wl; wl[1] = wtl[1]/Wl; wl[2] = wtl[2]/Wl; *lout = vl[0]*wl[0] + vl[1]*wl[1] + vl[2]*wl[2]; *rout = vr[0]*wr[0] + vr[1]*wr[1] + vr[2]*wr[2]; } // MP5 reconstruction from PLUTO // Imported by Mani Chandra #define MINMOD(a, b) ((a)*(b) > 0.0 ? (fabs(a) < fabs(b) ? (a):(b)):0.0) inline double median(double a, double b, double c) { return (a + MINMOD(b - a, c - a)); } #define ALPHA (4.0) #define EPSM (1.e-12) inline double mp5_subcalc(double Fjm2, double Fjm1, double Fj, double Fjp1, double Fjp2) { double f, d2, d2p, d2m; double dMMm, dMMp; double scrh1,scrh2, Fmin, Fmax; double fAV, fMD, fLC, fUL, fMP; f = 2.0*Fjm2 - 13.0*Fjm1 + 47.0*Fj + 27.0*Fjp1 - 3.0*Fjp2; f /= 60.0; fMP = Fj + MINMOD(Fjp1 - Fj, ALPHA*(Fj - Fjm1)); if ((f - Fj)*(f - fMP) <= EPSM) return f; d2m = Fjm2 + Fj - 2.0*Fjm1; // Eqn. 2.19 d2 = Fjm1 + Fjp1 - 2.0*Fj; d2p = Fj + Fjp2 - 2.0*Fjp1; // Eqn. 2.19 scrh1 = MINMOD(4.0*d2 - d2p, 4.0*d2p - d2); scrh2 = MINMOD(d2, d2p); dMMp = MINMOD(scrh1,scrh2); // Eqn. 2.27 scrh1 = MINMOD(4.0*d2m - d2, 4.0*d2 - d2m); scrh2 = MINMOD(d2, d2m); dMMm = MINMOD(scrh1,scrh2); // Eqn. 2.27 fUL = Fj + ALPHA*(Fj - Fjm1); // Eqn. 2.8 fAV = 0.5*(Fj + Fjp1); // Eqn. 2.16 fMD = fAV - 0.5*dMMp; // Eqn. 2.28 fLC = 0.5*(3.0*Fj - Fjm1) + 4.0/3.0*dMMm; // Eqn. 2.29 scrh1 = fmin(Fj, Fjp1); scrh1 = fmin(scrh1, fMD); scrh2 = fmin(Fj, fUL); scrh2 = fmin(scrh2, fLC); Fmin = fmax(scrh1, scrh2); // Eqn. (2.24a) scrh1 = fmax(Fj, Fjp1); scrh1 = fmax(scrh1, fMD); scrh2 = fmax(Fj, fUL); scrh2 = fmax(scrh2, fLC); Fmax = fmin(scrh1, scrh2); // Eqn. 2.24b f = median(f, Fmin, Fmax); // Eqn. 2.26 return f; } inline void mp5(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout) { *rout = mp5_subcalc(x1, x2, x3, x4, x5); *lout = mp5_subcalc(x5, x4, x3, x2, x1); } #undef MINMOD // Use the pre-processor for poor man's multiple dispatch void reconstruct(struct FluidState *S, GridPrim Pl, GridPrim Pr, int dir) { timer_start(TIMER_RECON); if (dir == 1) #pragma omp parallel for collapse(2) PLOOP JSLOOP(-1, N2) ISLOOP(-1, N1) RECON_ALGO(S->P[ip][j][i-2], S->P[ip][j][i-1], S->P[ip][j][i], S->P[ip][j][i+1], S->P[ip][j][i+2], &(Pl[ip][j][i]), &(Pr[ip][j][i])); else if (dir == 2) #pragma omp parallel for collapse(2) PLOOP JSLOOP(-1, N2) ISLOOP(-1, N1) RECON_ALGO(S->P[ip][j-2][i], S->P[ip][j-1][i], S->P[ip][j][i], S->P[ip][j+1][i], S->P[ip][j+2][i], &(Pl[ip][j][i]), &(Pr[ip][j][i])); timer_stop(TIMER_RECON); }
grid_utils.h
#pragma once #include <mtao/types.hpp> #include <array> #include <tuple> #include <iterator> #ifdef _OPENMP #include <omp.h> #endif namespace mtao::geometry::grid::utils { namespace internal { constexpr int full_mask(int D) { return (1 << (D)) - 1; } template <int N, int M, int Mask, typename coord_type, typename Func, bool Reverse = false, bool Parallel = false> struct masked_multi_looper { constexpr static bool Masked(int idx) { return !bool(Mask & (1 << idx)); } static bool masked(int mask, int idx) { return !bool(mask & (1 << idx)); } constexpr static int MyN = Reverse?M-N-1:N; static void run(const coord_type& bounds, coord_type& idx, const Func& f, int mask = 0) { if constexpr(Mask == full_mask(M+1)) { if(masked(mask,N)) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f, mask); return; } } if constexpr(Masked(N)) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f, mask); } else if constexpr(false && M >= N+2 && !Masked(N+1)) { constexpr static int NN = N+1; constexpr static int MyNN = Reverse?M-NN-1:NN; if constexpr(Parallel) { #ifdef _OPENMP #pragma omp parallel for #endif for(auto& i = idx[MyN] = 0; i < bounds[MyN]; ++i) { for(auto& j = idx[MyNN] = 0; j < bounds[MyNN]; ++j) { masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask); } } } else { for(auto& i = idx[MyN] = 0; i < bounds[MyN]; ++i) { for(auto& j = idx[MyNN] = 0; j < bounds[MyNN]; ++j) { masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask); } } } idx[MyNN] = 0; } else if constexpr(Parallel) { #ifdef _OPENMP #pragma omp parallel for #endif for(auto&& i = idx[MyN] = 0; i < bounds[MyN]; ++i) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask); } } else { for(auto&& i = idx[MyN] = 0; i < bounds[MyN]; ++i) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(bounds,idx,f,mask); } } idx[MyN] = 0; } static void run(const coord_type& begin, const coord_type& end, coord_type& idx, const Func& f, int mask = 0) { if constexpr(Mask == full_mask(M+1)) { if(masked(mask,N)) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask); return; } } if constexpr(Masked(N)) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f,mask); } else if constexpr(false && M >= N+2 && !Masked(N+1)) { constexpr static int NN = N+1; constexpr static int MyNN = Reverse?M-NN-1:NN; if constexpr(Parallel) { #ifdef _OPENMP #pragma omp parallel for #endif for(auto& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) { for(auto& j = idx[MyNN] = begin[MyNN]; j < end[MyNN]; ++j) { masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask); } } } else { for(auto& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) { for(auto& j = idx[MyNN] = begin[MyNN]; j < end[MyNN]; ++j) { masked_multi_looper<N+2,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask); } } } idx[MyNN] = begin[MyNN]; } else if constexpr(Parallel) { #ifdef _OPENMP #pragma omp parallel for #endif for(auto&& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask); } } else { for(auto&& i = idx[MyN] = begin[MyN]; i < end[MyN]; ++i) { masked_multi_looper<N+1,M,Mask,coord_type,Func,Reverse>::run(begin,end,idx,f, mask); } } idx[MyN] = begin[MyN]; } }; template <int N, int Mask, typename coord_type, typename Func, bool Reverse, bool Parallel> struct masked_multi_looper<N,N, Mask,coord_type,Func,Reverse, Parallel> { static void run(const coord_type& bounds, coord_type& idx, const Func& f, int mask = 0) { f(idx); } static void run(const coord_type& begin, const coord_type& end, coord_type& idx, const Func& f, int mask = 0) { f(idx); } }; template <int N, int M, typename coord_type, typename Func, bool Reverse = false> using multi_looper = masked_multi_looper<N,M,((1<<M) - 1),coord_type, Func, Reverse, false>; #ifdef _OPENMP template <int N, int M, typename coord_type, typename Func, bool Reverse = false> using multi_looper_parallel = masked_multi_looper<N,M,((1<<M) - 1),coord_type, Func, Reverse, true>; #else template <int N, int M, typename coord_type, typename Func, bool Reverse = false> using multi_looper_parallel = masked_multi_looper<N,M,((1<<M) - 1),coord_type, Func, Reverse, true>; #endif } // Mask is an integer M such that every dimension d s.t (1 << d) & M == 0, the looper skips that dimension template <int Mask, typename coord_type, typename Func> void masked_multi_loop(const coord_type& index, const Func& f) { coord_type idx = {}; internal::masked_multi_looper<0,std::tuple_size<coord_type>::value,Mask,coord_type,Func,false>::run(index,idx,f); } template <typename coord_type, typename Func> void masked_multi_loop(const coord_type& index, const Func& f, int mask) { coord_type idx = {}; constexpr static int size = std::tuple_size<coord_type>::value; internal::masked_multi_looper<0,size,internal::full_mask(size+1),coord_type,Func,false>::run(index,idx,f, mask); } template <typename coord_type, typename Func> void multi_loop(const coord_type& index, const Func& f) { coord_type idx = {}; internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(index,idx,f); } template <typename coord_type, typename Func> void multi_loop_parallel(const coord_type& index, const Func& f) { coord_type idx = {}; internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(index,idx,f); } template <typename coord_type, typename Func> void right_multi_loop(const coord_type& index, const Func& f) {//Same above but does dimensions in reverse coord_type idx = {}; std::fill(idx.begin(),idx.end(),0); internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,true>::run(index,idx,f); } template <typename coord_type, typename Func> void multi_loop(const coord_type& begin, const coord_type& end, const Func& f) { coord_type idx(begin); internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(begin,end,idx,f); } template <typename coord_type, typename Func> void multi_loop_parallel(const coord_type& begin, const coord_type& end, const Func& f) { coord_type idx(begin); internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,false>::run(begin,end,idx,f); } template <typename coord_type, typename Func> void right_multi_loop(const coord_type& begin, const coord_type& end, const Func& f) {//Same above but does dimensions in reverse coord_type idx(begin); internal::multi_looper<0,std::tuple_size<coord_type>::value,coord_type,Func,true>::run(begin,end,idx,f); } } namespace mtao { namespace interp_internal { template <typename T, typename U> T lerp(const T& a, const T& b, U alpha) {//alpha = 0 -> a, alpha = 1 -> b return (1-alpha) * a + alpha * b; } template <typename T, typename U> T bilerp(const T& a, const T& b, const T& c, const T& d, U alpha, U beta) { return lerp(lerp(a,b,alpha), lerp(c,d,alpha),beta);// 0,0 => a, 0,1 => b, 1,0 => c, 1,1 => d } template <typename T, typename U> T trilerp(const T& a, const T& b, const T& c, const T& d, const T& e, const T& f, const T& g, const T& h, U alpha, U beta, U gamma) { return lerp( bilerp(a,b,c,d,alpha,beta)// 0,0,0 => a, 0,1,0 => b, 1,0,0 => c, 1,1,0 => d , bilerp(e,f,g,h,alpha,beta)// 0,0,1 => e, 0,1,1 => f, 1,0,1 => g, 1,1,1 => h , gamma); } template <typename GridType, typename U> typename GridType::Scalar bilerp(const GridType& g, int i, int j, U alpha, U beta) { return bilerp(g(i,j),g(i+1,j ),g(i,j+1 ),g(i+1,j+1),alpha,beta); } template <typename GridType, typename U> typename GridType::Scalar trilerp(const GridType& g, int i, int j, int k, U alpha, U beta, U gamma) { return trilerp( g(i,j,k ),g(i+1,j ,k ),g(i,j+1 ,k ),g(i+1,j+1,k ) , g(i,j,k+1),g(i+1,j ,k+1),g(i,j+1 ,k+1),g(i+1,j+1,k+1) ,alpha,beta,gamma); } } template <typename T> void barycentric(T a, int ni, int* i, T* di ) { constexpr static T ome = 1 - std::numeric_limits<T>::epsilon(); a = std::min<T>(std::max<T>(a,0),ni); T v = a; T v2 = std::floor(v); *di = v-v2; *i = int(v2); if(*i < 0) { *di = 0; *i = 0; } else if(*i >= ni) { *i = ni-1; *di = ome; } } template <typename VecType, typename ShapeType,typename DiffType, int D = std::tuple_size<ShapeType>::value> void barycentric(const VecType& v, const ShapeType& shape, ShapeType& i, DiffType& di) { static_assert(std::tuple_size<DiffType>::value == D,""); for(size_t idx = 0; idx < D; ++idx) { barycentric(v(idx),shape[idx]-1,&i[idx],&di[idx]); } } template <typename T, typename GridType> typename GridType::Scalar lerp(const GridType& g, const std::array<int,GridType::D>& i, const std::array<T,GridType::D>& di); template <typename T, typename GridType> typename GridType::Scalar lerp(const GridType& g, const std::array<int,2>& i, const std::array<T,2>& di) { static_assert(GridType::D == 2,""); return mtao::interp_internal::bilerp(g,i[0],i[1],di[0],di[1]); } template <typename T, typename GridType> typename GridType::Scalar lerp(const GridType& g, const std::array<int,3>& i, const std::array<T,3>& di) { static_assert(GridType::D == 3,""); return mtao::interp_internal::trilerp(g,i[0],i[1],i[2],di[0],di[1],di[2]); } template <typename VecType, typename GridType> typename GridType::Scalar lerp(const GridType& g, const VecType& v) { constexpr int Dim = GridType::D;//mtao::Grid assumption //int i,j; //float di,dj; std::array<int,Dim> i; std::array<typename VecType::Scalar,Dim> di; barycentric(v,g.shape(),i,di); return lerp(g,i,di); } template <typename VecType, typename GridType> typename GridType::Scalar bilerp(const GridType& g, const VecType& v) { return lerp(g,v); } }
mandel.c
#ifdef _OPM #include <omp.h> #endif _OPM void mandel(int disp_width, int disp_height, int *array, int max_iter) { double scale_real, scale_imag; double x, y, u, v, u2, v2; int i, j, iter; scale_real = 3.5 / (double)disp_width; scale_imag = 3.5 / (double)disp_height; #pragma omp parallel shared(array,disp_width,disp_height,scale_real,scale_imag,max_iter) private(x, y, u, v, u2, v2, i, j, iter) { #pragma omp for for(i = 0; i < disp_width; i++) { x = ((double)i * scale_real) - 2.25; for(j = 0; j < disp_height; j++) { y = ((double)j * scale_imag) - 1.75; u = 0.0; v = 0.0; u2 = 0.0; v2 = 0.0; iter = 0; while ( u2 + v2 < 4.0 && iter < max_iter ) { v = 2 * v * u + y; u = u2 - v2 + x; u2 = u*u; v2 = v*v; iter = iter + 1; } // if we exceed max_iter, reset to zero iter = iter == max_iter ? 0 : iter; array[i*disp_height + j] = iter; } } } }
main.c
// C Compiler flag: -fopenmp #include <stdio.h> #include <omp.h> #include <stdlib.h> #define N 20 int main(int argc, char *argv[]) { omp_set_dynamic(0); // запретить библиотеке openmp менять число потоков во время исполнения //omp_set_num_threads(2); // установить число потоков в X int threadsCount = omp_get_max_threads(); printf("Threads count: %d\n", threadsCount); int d[6][8]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 8; j++) { d[i][j] = rand(); } } //printf("before first section: a: %d ; b: %d\n",a, b); #pragma omp parallel sections num_threads(3) firstprivate(d) { #pragma omp section { int sum = 0; for (int i = 0; i < 6; i++) { for (int j = 0; j < 8; j++) { sum += d[i][j]; } } printf("in d avg: %d\n", sum / (6 * 8)); } #pragma omp section { int min = d[0][0]; int max = d[0][0]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 8; j++) { if (min > d[i][j]) min = d[i][j]; if (max < d[i][j]) max = d[i][j]; } } printf("in d min: %d; max: %d\n", min, max); } #pragma omp section { int dividedOn3Count = 0; for (int i = 0; i < 6; i++) { for (int j = 0; j < 8; j++) { if (d[i][j] % 3 == 0) dividedOn3Count++; } } printf("in d mod 3 == 0 count: %d\n", dividedOn3Count); } } return 0; }
GB_unop__isnan_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fc64) // op(A') function: GB (_unop_tran__isnan_bool_fc64) // C type: bool // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = GB_cisnan (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnan (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = GB_cisnan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fc64) ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisnan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__cimag_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cimag_fp32_fc32 // op(A') function: GB_unop_tran__cimag_fp32_fc32 // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cimagf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cimagf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cimagf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CIMAG || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cimag_fp32_fc32 ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cimag_fp32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1632.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
amask_omp.c
/* Program omp_affinity reports the mask for each OMP thread, and works for nsec seconds (10). This allows one to inspect occupation through utilities like top (e.g. execute top, then hit the 1 key). Uses maskeraid utilities github.com/TACC/maskeraid map_to_procid(cpu_id): will set current thread to cpu_id amask_omp(): reports masks of the threads load_cpu_nsec(nsec): load the cpu for nsec (default 10) */ /* omp_affinity.c is a driver 1.) Get line arguments (optional): help or number of seconds for load 2.) Start OpenMP parallel region amask_omp() reports masks for each thread 3.) Set a work load on each thread 4.) Finish parallel region Kent Milfeld 12/16/15 Added cmd_line argument extraction. Kent Milfeld 2016/07/13 */ #include <stdio.h> #include <omp.h> #include "opts.h" void load_cpu_nsec(int nsec); void amask_omp(); int map_to_procid( int icore); int main(int argc, char *argv[]){ int nthrds, thrd, procid; //Thread info int nsec = 10; // Load, default time int ierr; // Error number // cmdln_get_nsec_or_help( &nsec, argc, argv); //optional, get nsec from cmd line Maskopts opts(argc,argv); #pragma omp parallel private(thrd,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); // procid = thrd; // set procid to thread number (thrd) // ierr = map_to_procid( procid ); // set your own affinity here amask_omp(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } }
map-4.c
/* PR c/96678. */ #define SIZE (100) typedef double Grid[SIZE]; void test (Grid src1) { #pragma omp target map(alloc:src1[:]) /* { dg-error "for array function parameter length expression must be specified" } */ { src1[0] = 5; } } void test2 (double src2[]) { #pragma omp target map(alloc:src2[:]) /* { dg-error "for array function parameter length expression must be specified" } */ { src2[0] = 5; } } void test3 (double *src3) { #pragma omp target map(alloc:src3[:]) /* { dg-error "for pointer type length expression must be specified" } */ { src3[0] = 5; } }
GB_subassign_05.c
//------------------------------------------------------------------------------ // GB_subassign_05: C(I,J)<M> = scalar ; no S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 05: C(I,J)<M> = scalar ; no S // M: present // Mask_comp: false // C_replace: false // accum: NULL // A: scalar // S: none // C: not bitmap // M: any sparsity #include "GB_subassign_methods.h" GrB_Info GB_subassign_05 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_MATRIX_WAIT_IF_JUMBLED (C) ; GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap int64_t zorig = C->nzombies ; const int64_t *GB_RESTRICT Ch = C->h ; const int64_t *GB_RESTRICT Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; const int64_t Cnvec = C->nvec ; GB_GET_MASK ; GB_GET_SCALAR ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 05: C(I,J)<M> = scalar ; no S //-------------------------------------------------------------------------- // Time: Close to Optimal: the method must iterate over all entries in M, // so the time is Omega(nnz(M)). For each entry M(i,j)=1, the // corresponding entry in C must be found and updated (inserted or // modified). This method does this with a binary search of C(:,jC) or a // direct lookup if C(:,jC) is dense. The time is thus O(nnz(M)*log(n)) in // the worst case, usually less than that since C(:,jC) often has O(1) // entries. An additional time of O(|J|*log(Cnvec)) is added if C is // hypersparse. There is no equivalent method that computes // C(I,J)<M>=scalar using the matrix S. // Method 05 and Method 07 are very similar. Also compare with Method 06n. //-------------------------------------------------------------------------- // Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07) //-------------------------------------------------------------------------- GB_SUBASSIGN_ONE_SLICE (M) ; // M cannot be jumbled //-------------------------------------------------------------------------- // phase 1: undelete zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; int64_t cjnz = pC_end - pC_start ; bool cjdense = (cjnz == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (cjdense) { //-------------------------------------------------------------- // C(:,jC) is dense so the binary search of C is not needed //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; GB_iC_DENSE_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } } } else { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (cij_found) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; zorig = C->nzombies ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; bool cjdense = ((pC_end - pC_start) == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (!cjdense) { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (!cij_found) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
matrixmultiply-ompacc.c
/* Naive matrix-matrix multiplication(mmm) By C. Liao */ #include <stdio.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #define N 1024 #define M 1024 #define K 1024 #define REAL float int i,j,k; REAL a[N][M],b[M][K],c[N][K], c2[N][K]; int init(); int mmm(); int mmm2(); int verify(); int main(void) { init(); mmm(); mmm2(); return verify(); } int init() { for (i=0;i<N;i++) for(j=0;j<M;j++) a[i][j]=3.0*i*j/N/M; for (i=0;i<M;i++) for(j=0;j<K;j++) b[i][j]=5.0*j*i/N/M; for (i=0;i<N;i++) for(j=0;j<K;j++) { c[i][j]=0.0; c2[i][j]=0.0; } return 0; } /* TODO: try different i,j,k orders a b e f a*e+ b*g , a*f+ b*h c d x g h = c*e+ d*g, c*f+ d*h */ int mmm() { //For static arrays with known dimension info. , no array section info. is needed //#pragma omp target map(tofrom:c[0:N][0:M]), map(to:a[0:N][0:M],b[0:M][0:K]) #pragma omp target map(tofrom:c), map(to:a,b) #pragma omp parallel for private(i,j,k) for (i = 0; i < N; i++) for (j = 0; j < M; j++) for (k = 0; k < K; k++) c[i][j]= c[i][j]+a[i][k]*b[k][j]; return 0; } int mmm2() { for (i = 0; i < N; i++) for (j = 0; j < M; j++) for (k = 0; k < K; k++) c2[i][j]= c2[i][j]+a[i][k]*b[k][j]; return 0; } int verify() { REAL sum=0.0, sum2=0.0; for (i=0;i<N;i++) for(j=0;j<K;j++) { sum+=c[i][j]; sum2+=c2[i][j]; } printf("sum of c[i][j] is %f\n",sum); printf("sum of c2[i][j] is %f\n",sum2); assert (sum == sum2); return 0; }
GB_convert_bitmap_worker.c
//------------------------------------------------------------------------------ // GB_convert_bitmap_worker: construct triplets or CSC/CSR from bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If A is iso and Ax_new is not NULL, the iso scalar is expanded into the // non-iso array Ax_new. Otherwise, if Ax_new and Ax are NULL then no values // are extracted. // TODO allow this function to do typecasting. Create 169 different versions // for all 13x13 versions. Use this as part of Method 24, C=A assignment. // Can also use typecasting for GB_Matrix_diag. #include "GB.h" #include "GB_partition.h" GrB_Info GB_convert_bitmap_worker // extract CSC/CSR or triplets from bitmap ( // outputs: int64_t *restrict Ap, // vector pointers for CSC/CSR form int64_t *restrict Ai, // indices for CSC/CSR or triplet form int64_t *restrict Aj, // vector indices for triplet form GB_void *restrict Ax_new, // values for CSC/CSR or triplet form int64_t *anvec_nonempty, // # of non-empty vectors // inputs: not modified const GrB_Matrix A, // matrix to extract; not modified GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (GB_IS_BITMAP (A)) ; ASSERT (Ap != NULL) ; // must be provided on input, size avdim+1 int64_t *restrict W = NULL ; size_t W_size = 0 ; const int64_t avdim = A->vdim ; const int64_t avlen = A->vlen ; const size_t asize = A->type->size ; //-------------------------------------------------------------------------- // count the entries in each vector //-------------------------------------------------------------------------- const int8_t *restrict Ab = A->b ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (avlen*avdim, chunk, nthreads_max) ; bool by_vector = (nthreads <= avdim) ; if (by_vector) { //---------------------------------------------------------------------- // compute all vectors in parallel (no workspace) //---------------------------------------------------------------------- int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { // ajnz = nnz (A (:,j)) int64_t ajnz = 0 ; int64_t pA_start = j * avlen ; for (int64_t i = 0 ; i < avlen ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; ajnz += Ab [p] ; ASSERT (Ab [p] == 0 || Ab [p] == 1) ; } Ap [j] = ajnz ; } } else { //---------------------------------------------------------------------- // compute blocks of rows in parallel //---------------------------------------------------------------------- // allocate one row of W per thread, each row of length avdim W = GB_MALLOC_WERK (nthreads * avdim, int64_t, &W_size) ; if (W == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *restrict Wtask = W + taskid * avdim ; int64_t istart, iend ; GB_PARTITION (istart, iend, avlen, taskid, nthreads) ; for (int64_t j = 0 ; j < avdim ; j++) { // ajnz = nnz (A (istart:iend-1,j)) int64_t ajnz = 0 ; int64_t pA_start = j * avlen ; for (int64_t i = istart ; i < iend ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; ajnz += Ab [p] ; ASSERT (Ab [p] == 0 || Ab [p] == 1) ; } Wtask [j] = ajnz ; } } // cumulative sum to compute nnz(A(:,j)) for each vector j int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { int64_t ajnz = 0 ; for (int taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *restrict Wtask = W + taskid * avdim ; int64_t c = Wtask [j] ; Wtask [j] = ajnz ; ajnz += c ; } Ap [j] = ajnz ; } } //-------------------------------------------------------------------------- // cumulative sum of Ap //-------------------------------------------------------------------------- int nth = GB_nthreads (avdim, chunk, nthreads_max) ; GB_cumsum (Ap, avdim, anvec_nonempty, nth, Context) ; int64_t anz = Ap [avdim] ; ASSERT (anz == A->nvals) ; //-------------------------------------------------------------------------- // gather the pattern and values from the bitmap //-------------------------------------------------------------------------- // TODO: add type-specific versions for built-in types const GB_void *restrict Ax = (GB_void *) (A->x) ; const bool A_iso = A->iso ; const bool numeric = (Ax_new != NULL && Ax != NULL) ; if (by_vector) { //---------------------------------------------------------------------- // construct all vectors in parallel (no workspace) //---------------------------------------------------------------------- int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { // gather from the bitmap into the new A (:,j) int64_t pnew = Ap [j] ; int64_t pA_start = j * avlen ; for (int64_t i = 0 ; i < avlen ; i++) { int64_t p = i + pA_start ; if (Ab [p]) { // A(i,j) is in the bitmap if (Ai != NULL) Ai [pnew] = i ; if (Aj != NULL) Aj [pnew] = j ; if (numeric) { // Ax_new [pnew] = Ax [p]) memcpy (Ax_new +(pnew)*asize, Ax +(A_iso ? 0:(p)*asize), asize) ; } pnew++ ; } } ASSERT (pnew == Ap [j+1]) ; } } else { //---------------------------------------------------------------------- // compute blocks of rows in parallel //---------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *restrict Wtask = W + taskid * avdim ; int64_t istart, iend ; GB_PARTITION (istart, iend, avlen, taskid, nthreads) ; for (int64_t j = 0 ; j < avdim ; j++) { // gather from the bitmap into the new A (:,j) int64_t pnew = Ap [j] + Wtask [j] ; int64_t pA_start = j * avlen ; for (int64_t i = istart ; i < iend ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; if (Ab [p]) { // A(i,j) is in the bitmap if (Ai != NULL) Ai [pnew] = i ; if (Aj != NULL) Aj [pnew] = j ; if (numeric) { // Ax_new [pnew] = Ax [p] ; memcpy (Ax_new +(pnew)*asize, Ax +(A_iso ? 0:(p)*asize), asize) ; } pnew++ ; } } } } } //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- GB_FREE_WERK (&W, W_size) ; return (GrB_SUCCESS) ; }
colormap.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR M M AAA PPPP % % C O O L O O R R MM MM A A P P % % C O O L O O RRRR M M M AAAAA PPPP % % C O O L O O R R M M A A P % % CCCC OOO LLLLL OOO R R M M A A P % % % % % % MagickCore Colormap Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % We use linked-lists because splay-trees do not currently support duplicate % key / value pairs (.e.g X11 green compliance and SVG green compliance). % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/client.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AcquireImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % The format of the AcquireImageColormap method is: % % MagickBooleanType AcquireImageColormap(Image *image,const size_t colors, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireImageColormap(Image *image, const size_t colors,ExceptionInfo *exception) { register ssize_t i; /* Allocate image colormap. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->colors=MagickMax(colors,1); if (image->colormap == (PixelInfo *) NULL) image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); else image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) { image->colors=0; image->storage_class=DirectClass; ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } for (i=0; i < (ssize_t) image->colors; i++) { double pixel; GetPixelInfo(image,image->colormap+i); pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1))); image->colormap[i].red=pixel; image->colormap[i].green=pixel; image->colormap[i].blue=pixel; image->colormap[i].alpha=OpaqueAlpha; image->colormap[i].alpha_trait=BlendPixelTrait; } return(SetImageStorageClass(image,PseudoClass,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C y c l e C o l o r m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CycleColormap() displaces an image's colormap by a given number of % positions. If you cycle the colormap a number of times you can produce % a psychodelic effect. % % WARNING: this assumes an images colormap is in a well know and defined % order. Currently Imagemagick has no way of setting that order. % % The format of the CycleColormapImage method is: % % MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o displace: displace the colormap this amount. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CycleColormapImage(Image *image, const ssize_t displace,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == DirectClass) (void) SetImageType(image,PaletteType,exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; ssize_t index; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors; if (index < 0) index+=(ssize_t) image->colors; SetPixelIndex(image,(Quantum) index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S o r t C o l o r m a p B y I n t e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SortColormapByIntensity() sorts the colormap of a PseudoClass image by % decreasing color intensity. % % The format of the SortColormapByIntensity method is: % % MagickBooleanType SortColormapByIntensity(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: A pointer to an Image structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelInfo *color_1, *color_2; int intensity; color_1=(const PixelInfo *) x; color_2=(const PixelInfo *) y; intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int) GetPixelInfoIntensity((const Image *) NULL,color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport MagickBooleanType SortColormapByIntensity(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; ssize_t y; unsigned short *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->storage_class != PseudoClass) return(MagickTrue); /* Allocate memory for pixel indexes. */ pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors, sizeof(*pixels)); if (pixels == (unsigned short *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Assign index values to colormap entries. */ for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; /* Sort image colormap by decreasing color popularity. */ qsort((void *) image->colormap,(size_t) image->colors, sizeof(*image->colormap),IntensityCompare); /* Update image colormap indexes to sorted colormap order. */ for (i=0; i < (ssize_t) image->colors; i++) pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register ssize_t x; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)]; SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (status == MagickFalse) break; } image_view=DestroyCacheView(image_view); pixels=(unsigned short *) RelinquishMagickMemory(pixels); return(status); }
matrix.h
/*************************************************************************** * include/stxxl/bits/containers/matrix.h * * Part of the STXXL. See http://stxxl.sourceforge.net * * Copyright (C) 2010-2011 Raoul Steffen <R-Steffen@gmx.de> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) **************************************************************************/ #ifndef STXXL_CONTAINERS_MATRIX_HEADER #define STXXL_CONTAINERS_MATRIX_HEADER #include <stxxl/bits/containers/vector.h> #include <stxxl/bits/common/counting_ptr.h> #include <stxxl/bits/mng/block_scheduler.h> #include <stxxl/bits/containers/matrix_arithmetic.h> STXXL_BEGIN_NAMESPACE //! \defgroup matrix matrix //! Efficient external memory matrix operations //! \ingroup stlcont //! \{ /* index-variable naming convention: * [MODIFIER_][UNIT_]DIMENSION[_in_[MODIFIER_]ENVIRONMENT] * * e.g.: * block_row = number of row measured in rows consisting of blocks * element_row_in_block = number of row measured in rows consisting of elements in the (row of) block(s) * * size-variable naming convention: * [MODIFIER_][ENVIRONMENT_]DIMENSION[_in_UNITs] * * e.g. * height_in_blocks */ // forward declaration template <typename ValueType, unsigned BlockSideLength> class matrix; //! external column-vector container for matrix multiplication //! \tparam ValueType type of contained objects (POD with no references to internal memory) template <typename ValueType> class column_vector : public vector<ValueType> { public: typedef vector<ValueType> vector_type; typedef typename vector_type::size_type size_type; using vector_type::size; //! \param n number of elements column_vector(size_type n = 0) : vector_type(n) { } column_vector operator + (const column_vector& right) const { assert(size() == right.size()); column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] + right[i]; return res; } column_vector operator - (const column_vector& right) const { assert(size() == right.size()); column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] - right[i]; return res; } column_vector operator * (const ValueType scalar) const { column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] * scalar; return res; } column_vector& operator += (const column_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] += right[i]; return *this; } column_vector& operator -= (const column_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] -= right[i]; return *this; } column_vector& operator *= (const ValueType scalar) { for (size_type i = 0; i < size(); ++i) (*this)[i] *= scalar; return *this; } void set_zero() { for (typename vector_type::iterator it = vector_type::begin(); it != vector_type::end(); ++it) *it = 0; } }; //! external row-vector container for matrix multiplication //! \tparam ValueType type of contained objects (POD with no references to internal memory) template <typename ValueType> class row_vector : public vector<ValueType> { public: typedef vector<ValueType> vector_type; typedef typename vector_type::size_type size_type; using vector_type::size; //! \param n number of elements row_vector(size_type n = 0) : vector_type(n) { } row_vector operator + (const row_vector& right) const { assert(size() == right.size()); row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] + right[i]; return res; } row_vector operator - (const row_vector& right) const { assert(size() == right.size()); row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] - right[i]; return res; } row_vector operator * (const ValueType scalar) const { row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] * scalar; return res; } template <unsigned BlockSideLength> row_vector operator * (const matrix<ValueType, BlockSideLength>& right) const { return right.multiply_from_left(*this); } ValueType operator * (const column_vector<ValueType>& right) const { ValueType res = 0; for (size_type i = 0; i < size(); ++i) res += (*this)[i] * right[i]; return res; } row_vector& operator += (const row_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] += right[i]; return *this; } row_vector& operator -= (const row_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] -= right[i]; return *this; } row_vector& operator *= (const ValueType scalar) { for (size_type i = 0; i < size(); ++i) (*this)[i] *= scalar; return *this; } void set_zero() { for (typename vector_type::iterator it = vector_type::begin(); it != vector_type::end(); ++it) *it = 0; } }; //! Specialized swappable_block that interprets uninitialized as containing zeros. //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! When initializing, all values are set to zero. template <typename ValueType, unsigned BlockSideLength> class matrix_swappable_block : public swappable_block<ValueType, BlockSideLength* BlockSideLength> { public: typedef typename swappable_block<ValueType, BlockSideLength* BlockSideLength>::internal_block_type internal_block_type; using swappable_block<ValueType, BlockSideLength* BlockSideLength>::get_internal_block; void fill_default() { // get_internal_block checks acquired internal_block_type& data = get_internal_block(); #if STXXL_PARALLEL #pragma omp parallel for #endif for (int_type row = 0; row < int_type(BlockSideLength); ++row) for (int_type col = 0; col < int_type(BlockSideLength); ++col) data[row * BlockSideLength + col] = 0; } }; //! External container for a (sub)matrix. Not intended for direct use. //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! Stores blocks only, so all measures (height, width, row, col) are in blocks. template <typename ValueType, unsigned BlockSideLength> class swappable_block_matrix : public atomic_counted_object { public: typedef int_type size_type; typedef int_type elem_size_type; typedef block_scheduler<matrix_swappable_block<ValueType, BlockSideLength> > block_scheduler_type; typedef typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type; typedef std::vector<swappable_block_identifier_type> blocks_type; typedef matrix_local::matrix_operations<ValueType, BlockSideLength> Ops; block_scheduler_type& bs; private: // assigning is not allowed swappable_block_matrix& operator = (const swappable_block_matrix& other); protected: //! height of the matrix in blocks size_type height, //! width of the matrix in blocks width, //! height copied from supermatrix in blocks height_from_supermatrix, //! width copied from supermatrix in blocks width_from_supermatrix; //! the matrice's blocks in row-major blocks_type blocks; //! if the elements in each block are in col-major instead of row-major bool elements_in_blocks_transposed; //! get identifier of the block at (row, col) swappable_block_identifier_type & bl(const size_type row, const size_type col) { return blocks[row * width + col]; } public: //! Create an empty swappable_block_matrix of given dimensions. swappable_block_matrix(block_scheduler_type& bs, const size_type height_in_blocks, const size_type width_in_blocks, const bool transposed = false) : bs(bs), height(height_in_blocks), width(width_in_blocks), height_from_supermatrix(0), width_from_supermatrix(0), blocks(height * width), elements_in_blocks_transposed(transposed) { for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } //! Create swappable_block_matrix of given dimensions that //! represents the submatrix of supermatrix starting at (from_row_in_blocks, from_col_in_blocks). //! //! If supermatrix is not large enough, the submatrix is padded with empty blocks. //! The supermatrix must not be destructed or transposed before the submatrix is destructed. swappable_block_matrix(const swappable_block_matrix& supermatrix, const size_type height_in_blocks, const size_type width_in_blocks, const size_type from_row_in_blocks, const size_type from_col_in_blocks) : bs(supermatrix.bs), height(height_in_blocks), width(width_in_blocks), height_from_supermatrix(std::min(supermatrix.height - from_row_in_blocks, height)), width_from_supermatrix(std::min(supermatrix.width - from_col_in_blocks, width)), blocks(height * width), elements_in_blocks_transposed(supermatrix.elements_in_blocks_transposed) { for (size_type row = 0; row < height_from_supermatrix; ++row) { for (size_type col = 0; col < width_from_supermatrix; ++col) bl(row, col) = supermatrix.block(row + from_row_in_blocks, col + from_col_in_blocks); for (size_type col = width_from_supermatrix; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } for (size_type row = height_from_supermatrix; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } //! Create swappable_block_matrix that represents the combination matrix ul ur dl dr. //! //! The submatrices are assumed to be of fitting dimensions and equal transposition. //! The submatrices must not be destructed or transposed before the matrix is destructed. swappable_block_matrix(const swappable_block_matrix& ul, const swappable_block_matrix& ur, const swappable_block_matrix& dl, const swappable_block_matrix& dr) : bs(ul.bs), height(ul.height + dl.height), width(ul.width + ur.width), height_from_supermatrix(height), width_from_supermatrix(width), blocks(height * width), elements_in_blocks_transposed(ul.elements_in_blocks_transposed) { for (size_type row = 0; row < ul.height; ++row) { for (size_type col = 0; col < ul.width; ++col) bl(row, col) = ul.block(row, col); for (size_type col = ul.width; col < width; ++col) bl(row, col) = ur.block(row, col - ul.width); } for (size_type row = ul.height; row < height; ++row) { for (size_type col = 0; col < ul.width; ++col) bl(row, col) = dl.block(row - ul.height, col); for (size_type col = ul.width; col < width; ++col) bl(row, col) = dr.block(row - ul.height, col - ul.width); } } swappable_block_matrix(const swappable_block_matrix& other) : atomic_counted_object(other), bs(other.bs), height(other.height), width(other.width), height_from_supermatrix(0), width_from_supermatrix(0), blocks(height * width), elements_in_blocks_transposed(false) { for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); // 0 + other is copying Ops::element_op(*this, other, typename Ops::addition()); } ~swappable_block_matrix() { for (size_type row = 0; row < height_from_supermatrix; ++row) { for (size_type col = width_from_supermatrix; col < width; ++col) bs.free_swappable_block(bl(row, col)); } for (size_type row = height_from_supermatrix; row < height; ++row) for (size_type col = 0; col < width; ++col) bs.free_swappable_block(bl(row, col)); } static size_type block_index_from_elem(elem_size_type index) { return index / BlockSideLength; } static int_type elem_index_in_block_from_elem(elem_size_type index) { return index % BlockSideLength; } // regards transposed int_type elem_index_in_block_from_elem(elem_size_type row, elem_size_type col) const { return (is_transposed()) ? row % BlockSideLength + col % BlockSideLength * BlockSideLength : row % BlockSideLength * BlockSideLength + col % BlockSideLength; } //! get identifier of the block at (row, col) const swappable_block_identifier_type & block(const size_type row, const size_type col) const { return blocks[row * width + col]; } //! get identifier of the block at (row, col) const swappable_block_identifier_type& operator () (const size_type row, const size_type col) const { return block(row, col); } const size_type & get_height() const { return height; } const size_type & get_width() const { return width; } //! if the elements inside the blocks are in transposed order i.e. column-major const bool & is_transposed() const { return elements_in_blocks_transposed; } void transpose() { // transpose matrix of blocks blocks_type bn(blocks.size()); for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bn[col * height + row] = bl(row, col); bn.swap(blocks); // swap dimensions std::swap(height, width); std::swap(height_from_supermatrix, width_from_supermatrix); elements_in_blocks_transposed = ! elements_in_blocks_transposed; } void set_zero() { for (typename blocks_type::iterator it = blocks.begin(); it != blocks.end(); ++it) bs.deinitialize(*it); } }; //! general iterator type that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_iterator { protected: typedef matrix<ValueType, BlockSideLength> matrix_type; typedef typename matrix_type::swappable_block_matrix_type swappable_block_matrix_type; typedef typename matrix_type::block_scheduler_type block_scheduler_type; typedef typename block_scheduler_type::internal_block_type internal_block_type; typedef typename matrix_type::elem_size_type elem_size_type; typedef typename matrix_type::block_size_type block_size_type; template <typename VT, unsigned BSL> friend class matrix; template <typename VT, unsigned BSL> friend class const_matrix_iterator; matrix_type* m; elem_size_type current_row, // \ both indices == -1 <=> empty iterator current_col; // / block_size_type current_block_row, current_block_col; internal_block_type* current_iblock; // NULL if block is not acquired void acquire_current_iblock() { if (! current_iblock) current_iblock = &m->data->bs.acquire(m->data->block(current_block_row, current_block_col)); } void release_current_iblock() { if (current_iblock) { m->data->bs.release(m->data->block(current_block_row, current_block_col), true); current_iblock = 0; } } //! create iterator pointing to given row and col matrix_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : m(&matrix), current_row(start_row), current_col(start_col), current_block_row(m->data->block_index_from_elem(start_row)), current_block_col(m->data->block_index_from_elem(start_col)), current_iblock(0) { } //! create empty iterator matrix_iterator(matrix_type& matrix) : m(&matrix), current_row(-1), // empty iterator current_col(-1), current_block_row(-1), current_block_col(-1), current_iblock(0) { } void set_empty() { release_current_iblock(); current_row = -1; current_col = -1; current_block_row = -1; current_block_col = -1; } public: matrix_iterator(const matrix_iterator& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } matrix_iterator& operator = (const matrix_iterator& other) { set_pos(other.current_row, other.current_col); m = other.m; if (other.current_iblock) acquire_current_iblock(); return *this; } ~matrix_iterator() { release_current_iblock(); } void set_row(const elem_size_type new_row) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row); if (new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; } current_row = new_row; } void set_col(const elem_size_type new_col) { const block_size_type new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col) { release_current_iblock(); current_block_col = new_block_col; } current_col = new_col; } void set_pos(const elem_size_type new_row, const elem_size_type new_col) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row), new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col || new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; current_block_col = new_block_col; } current_row = new_row; current_col = new_col; } void set_pos(const std::pair<elem_size_type, elem_size_type> new_pos) { set_pos(new_pos.first, new_pos.second); } const elem_size_type & get_row() const { return current_row; } const elem_size_type & get_col() const { return current_col; } std::pair<elem_size_type, elem_size_type> get_pos() const { return std::make_pair(current_row, current_col); } bool empty() const { return current_row == -1 && current_col == -1; } operator bool () const { return ! empty(); } bool operator == (const matrix_iterator& other) const { return current_row == other.current_row && current_col == other.current_col && m == other.m; } //! Returns reference access to the element referenced by the iterator. //! The reference is only valid so long as the iterator is not moved. ValueType& operator * () { acquire_current_iblock(); return (*current_iblock)[m->data->elem_index_in_block_from_elem(current_row, current_col)]; } }; //! row-major iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_row_major_iterator : public matrix_iterator<ValueType, BlockSideLength> { protected: typedef matrix_iterator<ValueType, BlockSideLength> matrix_iterator_type; typedef typename matrix_iterator_type::matrix_type matrix_type; typedef typename matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using matrix_iterator_type::m; using matrix_iterator_type::set_empty; //! create iterator pointing to given row and col matrix_row_major_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator matrix_row_major_iterator(matrix_type& matrix) : matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator matrix_row_major_iterator(const matrix_iterator_type& matrix_iterator) : matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. matrix_row_major_iterator& operator ++ () { if (get_col() + 1 < m->get_width()) // => not matrix_row_major_iterator the end of row, move right set_col(get_col() + 1); else if (get_row() + 1 < m->get_height()) // => at end of row but not last row, move to beginning of next row set_pos(get_row() + 1, 0); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. matrix_row_major_iterator& operator -- () { if (get_col() - 1 >= 0) // => not at the beginning of row, move left set_col(get_col() - 1); else if (get_row() - 1 >= 0) // => at beginning of row but not first row, move to end of previous row set_pos(get_row() - 1, m->get_width() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using matrix_iterator_type::get_row; using matrix_iterator_type::get_col; using matrix_iterator_type::set_col; using matrix_iterator_type::set_pos; }; //! column-major iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_col_major_iterator : public matrix_iterator<ValueType, BlockSideLength> { protected: typedef matrix_iterator<ValueType, BlockSideLength> matrix_iterator_type; typedef typename matrix_iterator_type::matrix_type matrix_type; typedef typename matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using matrix_iterator_type::m; using matrix_iterator_type::set_empty; //! create iterator pointing to given row and col matrix_col_major_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator matrix_col_major_iterator(matrix_type& matrix) : matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator matrix_col_major_iterator(const matrix_iterator_type& matrix_iterator) : matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. matrix_col_major_iterator& operator ++ () { if (get_row() + 1 < m->get_height()) // => not at the end of col, move down set_row(get_row() + 1); else if (get_col() + 1 < m->get_width()) // => at end of col but not last col, move to beginning of next col set_pos(0, get_col() + 1); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. matrix_col_major_iterator& operator -- () { if (get_row() - 1 >= 0) // => not at the beginning of col, move up set_row(get_row() - 1); else if (get_col() - 1 >= 0) // => at beginning of col but not first col, move to end of previous col set_pos(m->get_height() - 1, get_col() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using matrix_iterator_type::get_row; using matrix_iterator_type::get_col; using matrix_iterator_type::set_row; using matrix_iterator_type::set_pos; }; //! general const_iterator type that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_iterator { protected: typedef matrix<ValueType, BlockSideLength> matrix_type; typedef typename matrix_type::swappable_block_matrix_type swappable_block_matrix_type; typedef typename matrix_type::block_scheduler_type block_scheduler_type; typedef typename block_scheduler_type::internal_block_type internal_block_type; typedef typename matrix_type::elem_size_type elem_size_type; typedef typename matrix_type::block_size_type block_size_type; template <typename VT, unsigned BSL> friend class matrix; const matrix_type* m; elem_size_type current_row, // \ both indices == -1 <=> empty iterator current_col; // / block_size_type current_block_row, current_block_col; internal_block_type* current_iblock; // NULL if block is not acquired void acquire_current_iblock() { if (! current_iblock) current_iblock = &m->data->bs.acquire(m->data->block(current_block_row, current_block_col)); } void release_current_iblock() { if (current_iblock) { m->data->bs.release(m->data->block(current_block_row, current_block_col), false); current_iblock = 0; } } //! create iterator pointing to given row and col const_matrix_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : m(&matrix), current_row(start_row), current_col(start_col), current_block_row(m->data->block_index_from_elem(start_row)), current_block_col(m->data->block_index_from_elem(start_col)), current_iblock(0) { } //! create empty iterator const_matrix_iterator(const matrix_type& matrix) : m(&matrix), current_row(-1), // empty iterator current_col(-1), current_block_row(-1), current_block_col(-1), current_iblock(0) { } void set_empty() { release_current_iblock(); current_row = -1; current_col = -1; current_block_row = -1; current_block_col = -1; } public: const_matrix_iterator(const matrix_iterator<ValueType, BlockSideLength>& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } const_matrix_iterator(const const_matrix_iterator& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } const_matrix_iterator& operator = (const const_matrix_iterator& other) { set_pos(other.current_row, other.current_col); m = other.m; if (other.current_iblock) acquire_current_iblock(); return *this; } ~const_matrix_iterator() { release_current_iblock(); } void set_row(const elem_size_type new_row) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row); if (new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; } current_row = new_row; } void set_col(const elem_size_type new_col) { const block_size_type new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col) { release_current_iblock(); current_block_col = new_block_col; } current_col = new_col; } void set_pos(const elem_size_type new_row, const elem_size_type new_col) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row), new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col || new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; current_block_col = new_block_col; } current_row = new_row; current_col = new_col; } void set_pos(const std::pair<elem_size_type, elem_size_type> new_pos) { set_pos(new_pos.first, new_pos.second); } const elem_size_type & get_row() const { return current_row; } const elem_size_type & get_col() const { return current_col; } std::pair<elem_size_type, elem_size_type> get_pos() const { return std::make_pair(current_row, current_col); } bool empty() const { return current_row == -1 && current_col == -1; } operator bool () const { return ! empty(); } bool operator == (const const_matrix_iterator& other) const { return current_row == other.current_row && current_col == other.current_col && m == other.m; } //! Returns reference access to the element referenced by the iterator. //! The reference is only valid so long as the iterator is not moved. const ValueType& operator * () { acquire_current_iblock(); return (*current_iblock)[m->data->elem_index_in_block_from_elem(current_row, current_col)]; } }; //! row-major const_iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_row_major_iterator : public const_matrix_iterator<ValueType, BlockSideLength> { protected: typedef const_matrix_iterator<ValueType, BlockSideLength> const_matrix_iterator_type; typedef typename const_matrix_iterator_type::matrix_type matrix_type; typedef typename const_matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using const_matrix_iterator_type::m; using const_matrix_iterator_type::set_empty; //! create iterator pointing to given row and col const_matrix_row_major_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : const_matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator const_matrix_row_major_iterator(const matrix_type& matrix) : const_matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator const_matrix_row_major_iterator(const const_matrix_row_major_iterator& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } //! convert from matrix_iterator const_matrix_row_major_iterator(const const_matrix_iterator_type& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. const_matrix_row_major_iterator& operator ++ () { if (get_col() + 1 < m->get_width()) // => not matrix_row_major_iterator the end of row, move right set_col(get_col() + 1); else if (get_row() + 1 < m->get_height()) // => at end of row but not last row, move to beginning of next row set_pos(get_row() + 1, 0); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. const_matrix_row_major_iterator& operator -- () { if (get_col() - 1 >= 0) // => not at the beginning of row, move left set_col(get_col() - 1); else if (get_row() - 1 >= 0) // => at beginning of row but not first row, move to end of previous row set_pos(get_row() - 1, m->get_width() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using const_matrix_iterator_type::get_row; using const_matrix_iterator_type::get_col; using const_matrix_iterator_type::set_col; using const_matrix_iterator_type::set_pos; }; //! column-major const_iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_col_major_iterator : public const_matrix_iterator<ValueType, BlockSideLength> { protected: typedef const_matrix_iterator<ValueType, BlockSideLength> const_matrix_iterator_type; typedef typename const_matrix_iterator_type::matrix_type matrix_type; typedef typename const_matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using const_matrix_iterator_type::m; using const_matrix_iterator_type::set_empty; //! create iterator pointing to given row and col const_matrix_col_major_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : const_matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator const_matrix_col_major_iterator(const matrix_type& matrix) : const_matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator const_matrix_col_major_iterator(const matrix_iterator<ValueType, BlockSideLength>& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } //! convert from matrix_iterator const_matrix_col_major_iterator(const const_matrix_iterator_type& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. const_matrix_col_major_iterator& operator ++ () { if (get_row() + 1 < m->get_height()) // => not at the end of col, move down set_row(get_row() + 1); else if (get_col() + 1 < m->get_width()) // => at end of col but not last col, move to beginning of next col set_pos(0, get_col() + 1); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. const_matrix_col_major_iterator& operator -- () { if (get_row() - 1 >= 0) // => not at the beginning of col, move up set_row(get_row() - 1); else if (get_col() - 1 >= 0) // => at beginning of col but not first col, move to end of previous col set_pos(m->get_height() - 1, get_col() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using const_matrix_iterator_type::get_row; using const_matrix_iterator_type::get_col; using const_matrix_iterator_type::set_row; using const_matrix_iterator_type::set_pos; }; //! External matrix container. \n //! <b> Introduction </b> to matrix container: see \ref tutorial_matrix tutorial. \n //! <b> Design and Internals </b> of matrix container: see \ref design_matrix. //! //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! Divides the matrix in square submatrices (blocks). //! Blocks can be swapped individually to and from external memory. //! They are only swapped if necessary to minimize I/O. template <typename ValueType, unsigned BlockSideLength> class matrix { protected: typedef matrix<ValueType, BlockSideLength> matrix_type; typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type; typedef counting_ptr<swappable_block_matrix_type> swappable_block_matrix_pointer_type; typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type; typedef typename swappable_block_matrix_type::size_type block_size_type; typedef typename swappable_block_matrix_type::elem_size_type elem_size_type; typedef matrix_local::matrix_operations<ValueType, BlockSideLength> Ops; typedef matrix_swappable_block<ValueType, BlockSideLength> swappable_block_type; public: typedef matrix_iterator<ValueType, BlockSideLength> iterator; typedef const_matrix_iterator<ValueType, BlockSideLength> const_iterator; typedef matrix_row_major_iterator<ValueType, BlockSideLength> row_major_iterator; typedef matrix_col_major_iterator<ValueType, BlockSideLength> col_major_iterator; typedef const_matrix_row_major_iterator<ValueType, BlockSideLength> const_row_major_iterator; typedef const_matrix_col_major_iterator<ValueType, BlockSideLength> const_col_major_iterator; typedef column_vector<ValueType> column_vector_type; typedef row_vector<ValueType> row_vector_type; protected: template <typename VT, unsigned BSL> friend class matrix_iterator; template <typename VT, unsigned BSL> friend class const_matrix_iterator; elem_size_type height, width; swappable_block_matrix_pointer_type data; public: //! \name Constructors/Destructors //! \{ //! Creates a new matrix of given dimensions. Elements' values are set to zero. //! \param bs block scheduler used //! \param height height of the created matrix //! \param width width of the created matrix matrix(block_scheduler_type& bs, const elem_size_type height, const elem_size_type width) : height(height), width(width), data( new swappable_block_matrix_type( bs, div_ceil(height, BlockSideLength), div_ceil(width, BlockSideLength)) ) { } matrix(block_scheduler_type& bs, const column_vector_type& left, const row_vector_type& right) : height((elem_size_type)left.size()), width((elem_size_type)right.size()), data( new swappable_block_matrix_type( bs, div_ceil(height, BlockSideLength), div_ceil(width, BlockSideLength)) ) { Ops::recursive_matrix_from_vectors(*data, left, right); } ~matrix() { } //! \} //! \name Capacity //! \{ const elem_size_type & get_height() const { return height; } const elem_size_type & get_width() const { return width; } //! \} //! \name Iterators //! \{ iterator begin() { data.unify(); return iterator(*this, 0, 0); } const_iterator begin() const { return const_iterator(*this, 0, 0); } const_iterator cbegin() const { return const_iterator(*this, 0, 0); } iterator end() { data.unify(); return iterator(*this); } const_iterator end() const { return const_iterator(*this); } const_iterator cend() const { return const_iterator(*this); } const_iterator operator () (const elem_size_type row, const elem_size_type col) const { return const_iterator(*this, row, col); } iterator operator () (const elem_size_type row, const elem_size_type col) { data.unify(); return iterator(*this, row, col); } //! \} //! \name Modifiers //! \{ void transpose() { data.unify(); data->transpose(); std::swap(height, width); } void set_zero() { if (data.unique()) data->set_zero(); else data = new swappable_block_matrix_type (data->bs, div_ceil(height, BlockSideLength), div_ceil(width, BlockSideLength)); } //! \} //! \name Operations //! \{ matrix_type operator + (const matrix_type& right) const { assert(height == right.height && width == right.width); matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, *right.data, typename Ops::addition()); // more efficient than copying this and then adding right return res; } matrix_type operator - (const matrix_type& right) const { assert(height == right.height && width == right.width); matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, *right.data, typename Ops::subtraction()); // more efficient than copying this and then subtracting right return res; } matrix_type operator * (const matrix_type& right) const { return multiply(right); } matrix_type operator * (const ValueType scalar) const { matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, typename Ops::scalar_multiplication(scalar)); return res; } matrix_type& operator += (const matrix_type& right) { assert(height == right.height && width == right.width); data.unify(); Ops::element_op(*data, *right.data, typename Ops::addition()); return *this; } matrix_type& operator -= (const matrix_type& right) { assert(height == right.height && width == right.width); data.unify(); Ops::element_op(*data, *right.data, typename Ops::subtraction()); return *this; } matrix_type& operator *= (const matrix_type& right) { return *this = operator * (right); } // implicitly unifies by constructing a result-matrix matrix_type& operator *= (const ValueType scalar) { data.unify(); Ops::element_op(*data, typename Ops::scalar_multiplication(scalar)); return *this; } column_vector_type operator * (const column_vector_type& right) const { assert(elem_size_type(right.size()) == width); column_vector_type res(height); res.set_zero(); Ops::recursive_matrix_col_vector_multiply_and_add(*data, right, res); return res; } row_vector_type multiply_from_left(const row_vector_type& left) const { assert(elem_size_type(left.size()) == height); row_vector_type res(width); res.set_zero(); Ops::recursive_matrix_row_vector_multiply_and_add(left, *data, res); return res; } //! multiply with another matrix //! \param right matrix to multiply with //! \param multiplication_algorithm allows to choose the applied algorithm //! \param scheduling_algorithm allows to choose the applied algorithm //! //! Available algorithms are: \n //! 0: naive_multiply_and_add (I/O inefficient, slow) \n //! 1: recursive_multiply_and_add (recommended, default, stable time and I/O complexity) \n //! 2: strassen_winograd_multiply_and_add (sometimes fast but unstable time and I/O complexity) \n //! 3: multi_level_strassen_winograd_multiply_and_add (sometimes fast but unstable time and I/O complexity) \n //! 4: strassen_winograd_multiply, optimized pre- and postadditions (sometimes fast but unstable time and I/O complexity) \n //! 5: strassen_winograd_multiply_and_add_interleaved, optimized preadditions (sometimes fast but unstable time and I/O complexity) \n //! 6: multi_level_strassen_winograd_multiply_and_add_block_grained (sometimes fast but unstable time and I/O complexity) matrix_type multiply(const matrix_type& right, const int_type multiplication_algorithm = 1, const int_type scheduling_algorithm = 2) const { assert(width == right.height); assert(&data->bs == &right.data->bs); matrix_type res(data->bs, height, right.width); if (scheduling_algorithm > 0) { // all offline algos need a simulation-run delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_simulation<swappable_block_type>(data->bs) ); switch (multiplication_algorithm) { case 0: Ops::naive_multiply_and_add(*data, *right.data, *res.data); break; case 1: Ops::recursive_multiply_and_add(*data, *right.data, *res.data); break; case 2: Ops::strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 3: Ops::multi_level_strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 4: Ops::strassen_winograd_multiply(*data, *right.data, *res.data); break; case 5: Ops::strassen_winograd_multiply_and_add_interleaved(*data, *right.data, *res.data); break; case 6: Ops::multi_level_strassen_winograd_multiply_and_add_block_grained(*data, *right.data, *res.data); break; default: STXXL_ERRMSG("invalid multiplication-algorithm number"); break; } } switch (scheduling_algorithm) { case 0: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); break; case 1: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lfd<swappable_block_type>(data->bs) ); break; case 2: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lru_prefetching<swappable_block_type>(data->bs) ); break; default: STXXL_ERRMSG("invalid scheduling-algorithm number"); } switch (multiplication_algorithm) { case 0: Ops::naive_multiply_and_add(*data, *right.data, *res.data); break; case 1: Ops::recursive_multiply_and_add(*data, *right.data, *res.data); break; case 2: Ops::strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 3: Ops::multi_level_strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 4: Ops::strassen_winograd_multiply(*data, *right.data, *res.data); break; case 5: Ops::strassen_winograd_multiply_and_add_interleaved(*data, *right.data, *res.data); break; case 6: Ops::multi_level_strassen_winograd_multiply_and_add_block_grained(*data, *right.data, *res.data); break; default: STXXL_ERRMSG("invalid multiplication-algorithm number"); break; } delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); return res; } //! Use internal memory multiplication. Designated for testing. May exceed memory limitations. matrix_type multiply_internal(const matrix_type& right, const int_type scheduling_algorithm = 2) const { assert(width == right.height); assert(&data->bs == &right.data->bs); matrix_type res(data->bs, height, right.width); if (scheduling_algorithm > 0) { // all offline algos need a simulation-run delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_simulation<swappable_block_type>(data->bs) ); multiply_internal(right, res); } switch (scheduling_algorithm) { case 0: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); break; case 1: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lfd<swappable_block_type>(data->bs) ); break; case 2: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lru_prefetching<swappable_block_type>(data->bs) ); break; default: STXXL_ERRMSG("invalid scheduling-algorithm number"); } multiply_internal(right, res); delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); return res; } //! \} protected: void multiply_internal(const matrix_type& right, matrix_type& res) const { ValueType* A = new ValueType[height * width]; ValueType* B = new ValueType[right.height * right.width]; ValueType* C = new ValueType[res.height * res.width]; ValueType* vit; vit = A; for (const_row_major_iterator mit = cbegin(); mit != cend(); ++mit, ++vit) *vit = *mit; vit = B; for (const_row_major_iterator mit = right.cbegin(); mit != right.cend(); ++mit, ++vit) *vit = *mit; if (! res.data->bs.is_simulating()) { #if STXXL_BLAS gemm_wrapper(height, width, res.width, ValueType(1), false, A, false, B, ValueType(0), false, C); #else assert(false /* internal multiplication is only available for testing with blas */); #endif } vit = C; for (row_major_iterator mit = res.begin(); mit != res.end(); ++mit, ++vit) *mit = *vit; delete[] A; delete[] B; delete[] C; } }; //! \} STXXL_END_NAMESPACE #endif // !STXXL_CONTAINERS_MATRIX_HEADER // vim: et:ts=4:sw=4
parallel.c
#include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> void shellsort(int arr[], int n); int IsSort(int *array, int size); int main(int argc, char** argv) { int size = 1500000, algorithm, i, *arr, opt; arr = malloc(size* sizeof(int)); srand(time(NULL)); for (i = 0; i < size; i++) arr[i] = rand()%size; double start, end; omp_set_num_threads(12); start = omp_get_wtime(); shellsort(arr, size); end = omp_get_wtime(); printf("Tempo: %.3f\n",end - start); if(IsSort(arr, size) == 1) printf("Result: Sorted\n"); else printf("Result: Not Sorted\n"); return 0; } void shellsort(int arr[], int n){ int gap, i, j, grupoId, temp; for (gap = n/2; gap > 0; gap /= 2) #pragma omp parallel for private(j, i) for(grupoId = 0; grupoId < gap; grupoId++) for (i=gap+grupoId; i<n-grupoId; i+=gap) { int key = arr[i]; j = i - gap; while (j >= 0 && arr[j] > key) { arr[j+gap] = arr[j]; j-=gap; } arr[j+gap] = key; } } int IsSort(int *array, int size) { int i, value = 0; for(i = 1; i < size; i++) if(array[i-1] > array[i]) return 0; return 1; }
GroupCloseness.h
/* * GroupCloseness.h * * Created on: 03.10.2016 * Author: elisabetta bergamini */ #ifndef GROUPCLOSENESS_H_ #define GROUPCLOSENESS_H_ #include <numeric> #include "../base/Algorithm.h" #include "../graph/Graph.h" namespace NetworKit { /** * @ingroup centrality */ class GroupCloseness : public Algorithm { public: /** * Finds the group of nodes with highest (group) closeness centrality. * The algorithm is the one proposed in Bergamini et al., ALENEX 2018 and * finds a solution that is a (1-1/e)-approximation of the optimum. * The worst-case running time of this approach is quadratic, but usually * much faster in practice. * * @param G An unweighted graph. * @param k Size of the group of nodes * @param H If equal 0, simply runs the algorithm proposed in Bergamini et * al.. If > 0, interrupts all BFSs after H iterations (suggested for very * large networks). * @ */ GroupCloseness(const Graph &G, count k = 1, count H = 0); /** * Computes the group with maximum closeness on the graph passed in the * constructor. */ void run(); /** * Returns group with maximum closeness. */ std::vector<node> groupMaxCloseness(); /** * Computes farness (i.e., inverse of the closeness) for a given group * (stopping after H iterations if H > 0). */ double computeFarness(std::vector<node> S, count H = std::numeric_limits<count>::max()); /** * Computes the score of a specific group. */ double scoreOfGroup(const std::vector<node> &group) const; protected: edgeweight computeImprovement(node u, count n, Graph &G, count h); std::vector<count> newDistances(node u, count n, Graph &G, count h); Graph G; count k = 1; std::vector<count> D; count iters; count maxD; std::vector<count> d; std::vector<count> d1; std::vector<node> S; count H = 0; void checkGroup(const std::vector<node> &group) const; }; inline std::vector<node> GroupCloseness::groupMaxCloseness() { assureFinished(); return S; } inline void GroupCloseness::checkGroup(const std::vector<node> &group) const { const count z = G.upperNodeIdBound(); std::vector<bool> check(z, false); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(group.size()); ++i) { node u = group[i]; if (u >= z) { std::stringstream ss; ss << "Error: node " << u << " is not in the graph.\n"; throw std::runtime_error(ss.str()); } if (check[u]) { std::stringstream ss; ss << "Error: the group contains duplicates of node " << u << ".\n"; throw std::runtime_error(ss.str()); } check[u] = true; } } inline double GroupCloseness::scoreOfGroup(const std::vector<node> &group) const { std::vector<bool> explored(G.upperNodeIdBound(), false); std::vector<count> distance(G.upperNodeIdBound(), 0); for (count i = 0; i < group.size(); ++i) { explored[group[i]] = true; } std::vector<node> queue; auto exploreNode = [&](node w, count d) { explored[w] = true; queue.push_back(w); distance[w] = d; }; count d = 1; for (auto u : group) { G.forNeighborsOf(u, [&](node v) { if (!explored[v]) { exploreNode(v, d); } }); } while (queue.size() > 0) { ++d; node u = queue.front(); queue.erase(queue.begin()); G.forNeighborsOf(u, [&](node v) { if (!explored[v]) { exploreNode(v, d); } }); } double dSum = std::accumulate(distance.begin(), distance.end(), 0); return dSum == 0 ? 0. : ((double)G.upperNodeIdBound() - (double)group.size()) / dSum; } } /* namespace NetworKit */ #endif /* GROUPCLOSENESS_H_ */
GB_binop__minus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint32) // A*D function (colscale): GB (_AxD__minus_uint32) // D*A function (rowscale): GB (_DxB__minus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint32) // C=scalar+B GB (_bind1st__minus_uint32) // C=scalar+B' GB (_bind1st_tran__minus_uint32) // C=A+scalar GB (_bind2nd__minus_uint32) // C=A'+scalar GB (_bind2nd_tran__minus_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__minus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32) // A*D function (colscale): GB (_AxD__minus_int32) // D*A function (rowscale): GB (_DxB__minus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32) // C=scalar+B GB (_bind1st__minus_int32) // C=scalar+B' GB (_bind1st_tran__minus_int32) // C=A+scalar GB (_bind2nd__minus_int32) // C=A'+scalar GB (_bind2nd_tran__minus_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trsm_x_csr_u_lo_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = A->rows; int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT r = 0; r < m; r++) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++) { ALPHA_INT ac = A->col_indx[ai]; if (ac < r) { alpha_madde(temp, A->values[ai], y[out_y_col * ldy + ac]); } } ALPHA_Number t; alpha_setzero(t); alpha_mul(t, alpha, x[out_y_col * ldx + r]); alpha_sub(y[out_y_col * ldy + r], t, temp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
neural_net.c
// // Created by vmachado on 2/11/20. // #include "neural_net.h" float accurracy(matrix *prob, int *indices, int *labels) { int correct = 0; const float threshold = 0.5f; for (int i = 0; i < prob->rows; i++) { if (prob->data[i * prob->columns + labels[ indices[i] ]] > threshold) { correct++; } } return (float)correct / (float)prob->rows; } matrix* prob_del(matrix* prob, int *indices, int *labels) { matrix *dprob = matrix_alloc(prob->rows, prob->columns); const float n_inv = 1.0f / (float)prob->rows; #pragma omp parallel for for (int i = 0; i < prob->rows; i++) { float *dp_row = dprob->data + i * prob->columns; float *p_row = prob->data + i * prob->columns; dp_row[ labels[ indices[i] ] ] = -1.0f; for (int j = 0; j < prob->columns; j++) { dp_row[j] = (dp_row[j] + p_row[j]) * n_inv; } } return dprob; } float loss(matrix* prob, int *indices, int *labels){ float out = .0f; #pragma omp parallel for reduction(+: out) for (int i = 0; i < prob->rows; i++) { out += logf(prob->data[i * prob->columns + labels[ indices[i] ]]); } return -out / (float)prob->rows; } float reg_loss(void **layers, int *layer_type, int len, float l_reg) { float out = .0f; for (int i = 0; i < len; i++) { if (layer_type[i] == FC) { matrix *w = ((fc_layer*)layers[i])->weights; matrix *temp = elemwise_mul(w, w); out += 0.5f * l_reg * sum_elem(temp); matrix_free(temp); } } return out; } int* random_indices(int samples) { int *indices = aalloc(sizeof(int) * samples); #pragma omp parallel for for (int i = 0; i < samples; i++) { indices[i] = i; } srand((int)time(0)); for (int i = samples - 1; i >= 1; i--) { int rand_index = rand() % (i + 1); int temp = indices[i]; indices[i] = indices[rand_index]; indices[rand_index] = temp; } return indices; } matrix* get_batch(int *indices, float **data_set, int batch_len, int data_dim) { matrix *batch = matrix_alloc(batch_len, data_dim); #pragma omp parallel for for (int i = 0; i < batch_len; i++) { register float *dest_ptr = batch->data + (i * data_dim); for (int j = 0; j < data_dim; j++) { dest_ptr[j] = data_set[ indices[i] ][j]; } } return batch; }
rose_doall_2.c
#include "omp.h" int i; int j; int a[100][100]; void foo() { #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { a[i][j] = a[i][j] + 1; } } }
solution.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define Length 1.0 #define Temperature_1 1.0 #define Temperature_2 5.0 int main(int argc, char **argv) { // Время, когда требуется посчитать распределение температуры в стержне double Time = 1.0; // Число разбиений по координате size_t M = 10; // Количество паралельных процессов size_t size = 1; if (argc > 1) { // Считываем время, когда хотим узнать распределение температуры // в стержне Time = atof(argv[1]); if (Time < 0) { printf("Sorry, timemachine hasn't been invented yet!"); return EXIT_FAILURE; } if (argc > 2) { // Число разбиений по координате M = atoll(argv[2]); if (M < 2) { // Иначе метод не сходится printf("Invalid values!\n"); return EXIT_FAILURE; } if (argc > 3) { size = atoll(argv[3]); if (M <= size) { // Если мелкость разбиения координаты настолько мала, // что не будут использованы все процессы printf("Required number of processes is unreasonable \ compared to coordinate partition!\n"); return EXIT_FAILURE; } } } } // Шаг по координате double h = Length / M; // Шаг по времени (число Куранта) double tau = 0.3 * h * h; // Число разбиений по времени int N = Time / tau; // Массивы температуры для момента времени n и n + 1 соответственно double *u0 = (double*) malloc(sizeof(double) * M); double *u1 = (double*) malloc(sizeof(double) * M); // Счетчики для циклов по времени и координате size_t m, n; // Начальные условия (f(x) = 0 ) for (m = 0; m < M; m++) { u0[m] = u1[m] = 0.0; } // Задаем граничные условия u0[0] = u1[0] = Temperature_1; u0[M - 1] = u1[M - 1] = Temperature_2; // Массив индексов передаваемых точек size_t *left_index = (size_t*) malloc(sizeof(size_t) * size + 1); left_index[0] = 1; // Чтобы избежать костылей при передаче массивов 0-ому процессу, // определяю правый конец последнего массива left_index[size] = M - 1; // Итеративно определяю левые концы отрезков, передаваемые каждому процессу // Правый конец i-го процесса = левому концу (i + 1)-го for (int i = 1; i < size; i++) { left_index[i] = left_index[i - 1] + (M / size) + ((i - 1) < ((M % size) - 2)); } // Создание массив замков omp_lock_t* lock = (omp_lock_t*) malloc(sizeof(omp_lock_t) * 2 * size); // Инициализация массива замков for (size_t i = 0; i < 2 * size; ++i) { omp_init_lock(&lock[i]); } // Вспомогательная переменная, показывающая кол-во процессов, // закончивших данную итерацию цикла size_t epoc = 0; // Задаем кол-во процессов для следующего распараллеливания omp_set_num_threads(size); #pragma omp parallel private(n, m) { size_t id = omp_get_thread_num(); // Цикл по времени for (n = 0; n < N; n++) { // Обнуляем глобальную эпоху #pragma omp atomic epoc = 0; // Явная четырехточечная схема for (m = left_index[id]; m < left_index[id + 1]; ++m) { if ((m == left_index[id]) && (id != 0)) { // Запоминаем боковой узел omp_set_lock(&lock[id - 1 + size]); double left = u0[left_index[id] - 1]; omp_unset_lock(&lock[id - 1 + size]); // Проводим защищенно вычисления omp_set_lock(&lock[id]); u1[m] = u0[m] + 0.3 * (left - 2.0 * u0[m] + u0[m + 1]); omp_unset_lock(&lock[id]); } if ((m == left_index[id + 1] - 1) && (id != size - 1)) { // Запоминаем боковой узел omp_set_lock(&lock[id + 1]); double right = u0[left_index[id + 1]]; omp_unset_lock(&lock[id + 1]); // Проводим защищенно вычисления omp_set_lock(&lock[id + size]); u1[m] = u0[m] + 0.3 * (u0[m - 1] - 2.0 * u0[m] + right); omp_unset_lock(&lock[id + size]); } u1[m] = u0[m] + 0.3 * (u0[m - 1] - 2.0 * u0[m] + u0[m + 1]); } // Атомарно инкрементируем, показывая, что процесс закончил работу #pragma omp atomic epoc++; #pragma omp single { // Не обновляем результат, пока не проработали все процессы while (epoc < size) { __asm volatile ("pause" ::: "memory"); } // Обновление результатов double *t = u0; u0 = u1; u1 = t; } } } // Удаление замка for (size_t i = 0; i < 2 * size; ++i) { omp_destroy_lock(&lock[i]); } // Вывод на экран for (m = 0; m < M; m++) { printf("%lf %lf\n", m * h, u1[m]); } // Освобождение памяти free(u0); free(u1); return EXIT_SUCCESS; }
GB_unop__bnot_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__bnot_int32_int32 // op(A') function: GB_unop_tran__bnot_int32_int32 // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = ~(z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__bnot_int32_int32 ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = ~(z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__bnot_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__eq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fp32) // A*D function (colscale): GB (_AxD__eq_fp32) // D*A function (rowscale): GB (_DxB__eq_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fp32) // C=scalar+B GB (_bind1st__eq_fp32) // C=scalar+B' GB (_bind1st_tran__eq_fp32) // C=A+scalar GB (_bind2nd__eq_fp32) // C=A'+scalar GB (_bind2nd_tran__eq_fp32) // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
J1OrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #include "QMCWaveFunctions/WaveFunctionComponent.h" #include "QMCWaveFunctions/Jastrow/DiffOneBodyJastrowOrbital.h" #include "Utilities/qmc_common.h" #include "CPU/SIMD/aligned_allocator.hpp" #include "CPU/SIMD/algorithm.hpp" #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for one-body Jastrow function using multiple functors */ template<class FT> struct J1OrbitalSoA : public WaveFunctionComponent { ///alias FuncType using FuncType = FT; ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using DistRow = DistanceTableData::DistRow; using DisplRow = DistanceTableData::DisplRow; ///table index const int myTableID; ///number of ions int Nions; ///number of electrons int Nelec; ///number of groups int NumGroups; ///reference to the sources (ions) const ParticleSet& Ions; valT curAt; valT curLap; posT curGrad; ///\f$Vat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Vat; aligned_vector<valT> U, dU, d2U, d3U; aligned_vector<valT> DistCompressed; aligned_vector<int> DistIndice; Vector<posT> Grad; Vector<valT> Lap; ///Container for \f$F[ig*NumGroups+jg]\f$ std::vector<FT*> F; J1OrbitalSoA(const std::string& obj_name, const ParticleSet& ions, ParticleSet& els) : WaveFunctionComponent("J1OrbitalSoA", obj_name), myTableID(els.addTable(ions)), Ions(ions) { if (myName.empty()) throw std::runtime_error("J1OrbitalSoA object name cannot be empty!"); initialize(els); } J1OrbitalSoA(const J1OrbitalSoA& rhs) = delete; ~J1OrbitalSoA() { for (int i = 0; i < F.size(); ++i) if (F[i] != nullptr) delete F[i]; } /* initialize storage */ void initialize(const ParticleSet& els) { Nions = Ions.getTotalNum(); NumGroups = Ions.getSpeciesSet().getTotalNum(); F.resize(std::max(NumGroups, 4), nullptr); if (NumGroups > 1 && !Ions.IsGrouped) { NumGroups = 0; } Nelec = els.getTotalNum(); Vat.resize(Nelec); Grad.resize(Nelec); Lap.resize(Nelec); U.resize(Nions); dU.resize(Nions); d2U.resize(Nions); d3U.resize(Nions); DistCompressed.resize(Nions); DistIndice.resize(Nions); } void addFunc(int source_type, FT* afunc, int target_type = -1) { if (F[source_type] != nullptr) delete F[source_type]; F[source_type] = afunc; } void recompute(ParticleSet& P) { const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { computeU3(P, iat, d_ie.getDistRow(iat)); Vat[iat] = simd::accumulate_n(U.data(), Nions, valT()); Lap[iat] = accumulateGL(dU.data(), d2U.data(), d_ie.getDisplRow(iat), Grad[iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi) { const DistanceTableData& d_ie(P.getDistTable(myTableID)); valT dudr, d2udr2; Tensor<valT, DIM> ident; grad_grad_psi = 0.0; ident.diagonal(1.0); for (int iel = 0; iel < Nelec; ++iel) { const auto& dist = d_ie.getDistRow(iel); const auto& displ = d_ie.getDisplRow(iel); for (int iat = 0; iat < Nions; iat++) { int gid = Ions.GroupID[iat]; auto* func = F[gid]; if (func != nullptr) { RealType r = dist[iat]; RealType rinv = 1.0 / r; PosType dr = displ[iat]; func->evaluate(r, dudr, d2udr2); grad_grad_psi[iel] -= rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv; } } } } PsiValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; curAt = computeU(P.getDistTable(myTableID).getTempDists()); return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt)); } inline void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Vat[VP.refPtcl] - computeU(VP.getDistTable(myTableID).getDistRow(k))); } inline valT computeU(const DistRow& dist) { valT curVat(0); if (NumGroups > 0) { for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] != nullptr) curVat += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) curVat += F[gid]->evaluate(dist[c]); } } return curVat; } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const auto& dist = P.getDistTable(myTableID).getTempDists(); curAt = valT(0); if (NumGroups > 0) { for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] != nullptr) curAt += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) curAt += F[gid]->evaluate(dist[c]); } } for (int i = 0; i < Nelec; ++i) ratios[i] = std::exp(Vat[i] - curAt); } inline void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); for (size_t iat = 0; iat < Nelec; ++iat) G[iat] += Grad[iat]; for (size_t iat = 0; iat < Nelec; ++iat) L[iat] -= Lap[iat]; LogValue = -simd::accumulate_n(Vat.data(), Nelec, valT()); } /** compute gradient and lap * @return lap */ inline valT accumulateGL(const valT* restrict du, const valT* restrict d2u, const DisplRow& displ, posT& grad) const { valT lap(0); constexpr valT lapfac = OHMMS_DIM - RealType(1); //#pragma omp simd reduction(+:lap) for (int jat = 0; jat < Nions; ++jat) lap += d2u[jat] + lapfac * du[jat]; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); //#pragma omp simd reduction(+:s) for (int jat = 0; jat < Nions; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } return lap; } /** compute U, dU and d2U * @param P quantum particleset * @param iat the moving particle * @param dist starting address of the distances of the ions wrt the iat-th particle */ inline void computeU3(ParticleSet& P, int iat, const DistRow& dist) { if (NumGroups > 0) { //ions are grouped constexpr valT czero(0); std::fill_n(U.data(), Nions, czero); std::fill_n(dU.data(), Nions, czero); std::fill_n(d2U.data(), Nions, czero); for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] == nullptr) continue; F[jg]->evaluateVGL(-1, Ions.first(jg), Ions.last(jg), dist.data(), U.data(), dU.data(), d2U.data(), DistCompressed.data(), DistIndice.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) { U[c] = F[gid]->evaluate(dist[c], dU[c], d2U[c]); dU[c] /= dist[c]; } } } } /** compute the gradient during particle-by-particle update * @param P quantum particleset * @param iat particle index */ GradType evalGrad(ParticleSet& P, int iat) { return GradType(Grad[iat]); } /** compute the gradient during particle-by-particle update * @param P quantum particleset * @param iat particle index * * Using getTempDists(). curAt, curGrad and curLap are computed. */ PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; computeU3(P, iat, P.getDistTable(myTableID).getTempDists()); curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad); curAt = simd::accumulate_n(U.data(), Nions, valT()); grad_iat += curGrad; return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt)); } /** Rejected move. Nothing to do */ inline void restore(int iat) {} /** Accpted move. Update Vat[iat],Grad[iat] and Lap[iat] */ void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) { if (UpdateMode == ORB_PBYP_RATIO) { computeU3(P, iat, P.getDistTable(myTableID).getTempDists()); curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad); } LogValue += Vat[iat] - curAt; Vat[iat] = curAt; Grad[iat] = curGrad; Lap[iat] = curLap; } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Vat.begin(), Vat.end()); buf.add(Grad.begin(), Grad.end()); buf.add(Lap.begin(), Lap.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Vat.free(); Grad.free(); Lap.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Vat.attachReference(buf.lendReference<valT>(Nelec), Nelec); Grad.attachReference(buf.lendReference<posT>(Nelec), Nelec); Lap.attachReference(buf.lendReference<valT>(Nelec), Nelec); } WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const { J1OrbitalSoA<FT>* j1copy = new J1OrbitalSoA<FT>(myName, Ions, tqp); j1copy->Optimizable = Optimizable; for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) j1copy->addFunc(i, new FT(*F[i])); } if (dPsi) { j1copy->dPsi = dPsi->makeClone(tqp); } return j1copy; } /**@{ WaveFunctionComponent virtual functions that are not essential for the development */ void reportStatus(std::ostream& os) { for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) F[i]->myVars.print(os); } } void checkInVariables(opt_variables_type& active) { myVars.clear(); for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) { F[i]->checkInVariables(active); F[i]->checkInVariables(myVars); } } } void checkOutVariables(const opt_variables_type& active) { myVars.getIndex(active); Optimizable = myVars.is_optimizable(); for (size_t i = 0, n = F.size(); i < n; ++i) if (F[i] != nullptr) F[i]->checkOutVariables(active); if (dPsi) dPsi->checkOutVariables(active); } void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; for (size_t i = 0, n = F.size(); i < n; ++i) if (F[i] != nullptr) F[i]->resetParameters(active); for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } if (dPsi) dPsi->resetParameters(active); } /**@} */ inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc) { GradType g_return(0.0); const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { const auto& dist = d_ie.getDistRow(iat); const auto& displ = d_ie.getDisplRow(iat); int gid = source.GroupID[isrc]; RealType r = dist[isrc]; RealType rinv = 1.0 / r; PosType dr = displ[isrc]; if (F[gid] != nullptr) { U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]); g_return -= dU[isrc] * rinv * dr; } } return g_return; } inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { GradType g_return(0.0); const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { const auto& dist = d_ie.getDistRow(iat); const auto& displ = d_ie.getDisplRow(iat); int gid = source.GroupID[isrc]; RealType r = dist[isrc]; RealType rinv = 1.0 / r; PosType dr = displ[isrc]; if (F[gid] != nullptr) { U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]); } else { APP_ABORT("J1OrbitalSoa::evaluateGradSource: F[gid]==nullptr") } g_return -= dU[isrc] * rinv * dr; //The following terms depend only on the radial component r. Thus, //we compute them and mix with position vectors to acquire the full //cartesian vector objects. valT grad_component = (d2U[isrc] - dU[isrc] * rinv); valT lapl_component = d3U[isrc] + 2 * rinv * grad_component; for (int idim = 0; idim < OHMMS_DIM; idim++) { grad_grad[idim][iat] += dr[idim] * dr * rinv * rinv * grad_component; grad_grad[idim][iat][idim] += rinv * dU[isrc]; lapl_grad[idim][iat] -= lapl_component * rinv * dr[idim]; } } return g_return; } }; } // namespace qmcplusplus #endif
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void convdw3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #192] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" //r0 "add %3, %3, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "0: \n" "fmul v7.4s, v9.4s, %14.s[0] \n" "and v13.16b, %17.16b, %17.16b \n" // v13 = _bias0 "fmul v6.4s, v11.4s, %14.s[1] \n" "fmla v13.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v9.4s, v10.4s}, [%4] \n" "add %4, %4, #16 \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %15.s[1] \n" "fmla v13.4s, v12.4s, %15.s[2] \n" "fmul v8.4s, v9.4s, %14.s[0] \n" "and v15.16b, %17.16b, %17.16b \n" // v15 = _bias0 "fmul v14.4s, v11.4s, %14.s[1] \n" "fmla v15.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v9.4s, v10.4s}, [%5] \n" "add %5, %5, #16 \n" "fmla v7.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %16.s[1] \n" "fmla v13.4s, v12.4s, %16.s[2] \n" "fmla v8.4s, v9.4s, %15.s[0] \n" "fmla v14.4s, v11.4s, %15.s[1] \n" "fmla v15.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%6, #192] \n" "ld1 {v9.4s, v10.4s}, [%6] \n" "add %6, %6, #16 \n" "fmla v8.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v14.4s, v11.4s, %16.s[1] \n" "fmla v15.4s, v12.4s, %16.s[2] \n" "fadd v7.4s, v7.4s, v6.4s \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" //ro, for next loop "fadd v8.4s, v8.4s, v14.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v8.4s, v8.4s, v15.4s \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" // for next loop "ext v12.16b, v9.16b, v10.16b, #8 \n" // for next loop "add %3, %3, #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v8.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %3, %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "vmul.f32 q7, q9, %e14[0] \n" "vand q13, %q17, %q17 \n"// q13 = _bias0 "vmul.f32 q6, q11, %e14[1] \n" "vmla.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "vmul.f32 q8, q9, %e14[0] \n" "vand q15, %q17, %q17 \n"// q15 = _bias0 "vmul.f32 q14, q11, %e14[1] \n" "vmla.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); float32x4_t _sum2 = vmulq_f32(_r10, _k012x); _sum2 = vmlaq_f32(_sum2, _r20, _k345x); _sum2 = vmlaq_f32(_sum2, _r30, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); _sum2 = vsetq_lane_f32(bias0, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #192] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" //r0 "add %2, %2, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "0: \n" "fmul v7.4s, v8.4s, %10.s[0] \n" "and v14.16b, %13.16b, %13.16b \n" // v14 = _bias0 "fmul v13.4s, v10.4s, %10.s[1] \n" "fmla v14.4s, v11.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" //r1 "add %3, %3, #16 \n" "fmla v7.4s, v8.4s, %11.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %11.s[1] \n" "fmla v14.4s, v11.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" //r2 "add %4, %4, #16 \n" "fmla v7.4s, v8.4s, %12.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %12.s[1] \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" //r0, for next loop "add %2, %2, #16 \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v7.4s, v7.4s, v14.4s \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" // for next loop "ext v11.16b, v8.16b, v9.16b, #8 \n" // for next loop "st1 {v7.4s}, [%1], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "vmul.f32 q7, q8, %e10[0] \n" "vand q14, %q13, %q13 \n"// q14 = _bias0 "vmul.f32 q13, q10, %e10[1] \n" "vmla.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "and v11.16b, %13.16b, %13.16b \n" // v11 = _bias0 "0: \n" "fmul v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "and v11.16b, %13.16b, %13.16b \n" // v11 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vand q11, %q13, %q13 \n" "0: \n" "vmul.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "vand q11, %q13, %q13 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
omp_parallel_if.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int test_omp_parallel_if() { int i; int sum; int known_sum; int mysum; int control=1; sum =0; known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 ; #pragma omp parallel private(i) if(control==0) { mysum = 0; for (i = 1; i <= LOOPCOUNT; i++) { mysum = mysum + i; } #pragma omp critical { sum = sum + mysum; } } return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_if()) { num_failed++; } } return num_failed; }
centroid.h
#ifndef MINOCORE_CLUSTERING_CENTROID_H__ #define MINOCORE_CLUSTERING_CENTROID_H__ #include "minicore/util/blaze_adaptor.h" #include "minicore/util/csc.h" #include "minicore/dist.h" #include "minicore/optim/kmedian.h" namespace minicore { namespace clustering { using blaze::unchecked; enum CentroidPol { FULL_WEIGHTED_MEAN, // SQRL2, Bregman Divergences (+ convex combinations), cosine distance L1_MEDIAN, // L1 TVD_MEDIAN, // Total variation distance, which is L1 in probability space GEO_MEDIAN, // L2 norm JSM_MEDIAN, // Unknown as of yet, but we will try weighted mean for now NOT_APPLICABLE }; static constexpr const char *cp2str(CentroidPol pol) { switch(pol) { case FULL_WEIGHTED_MEAN: return "full weighted mean"; case L1_MEDIAN: return "l1 median"; case TVD_MEDIAN: return "tvd median"; case GEO_MEDIAN: return "geo median"; case JSM_MEDIAN: return "jsm median, same as full for now"; default: case NOT_APPLICABLE: return "not applicable"; } } template<typename CtrT, typename VT, bool TF> void set_center(CtrT &lhs, const blaze::Vector<VT, TF> &rhs) { if constexpr(blaze::IsResizable_v<CtrT>) { if(lhs.size() != (*rhs).size()) lhs.resize((*rhs).size()); } else { if(lhs.size() != (*rhs).size()) throw std::runtime_error(std::string("lhs size is not correct. function: ") + __PRETTY_FUNCTION__ + "lhs: " + std::to_string(lhs.size()) + ", rhs: " + std::to_string((*rhs).size())); } if constexpr(blaze::IsSparseVector_v<CtrT>) { lhs.reserve(nonZeros(*rhs)); } lhs = *rhs; } template<typename CtrT, typename VT, typename IT> void set_center(CtrT &lhs, const util::ProdCSparseVector<VT, IT> &rhs) { lhs.reserve(rhs.nnz()); if(lhs.size() != rhs.dim_) { if constexpr(blaze::IsResizable_v<CtrT>) { lhs.resize(rhs.dim_); } else { throw std::runtime_error("Could not resize unresizable ctr\n"); } } lhs.reset(); if constexpr(blaze::IsSparseVector_v<CtrT>) { lhs.reserve(nonZeros(rhs)); } for(const auto &pair: rhs) lhs[pair.index()] = pair.value(); } template<typename CtrT, typename VT, typename IT> void set_center(CtrT &lhs, const util::CSparseVector<VT, IT> &rhs) { lhs.reserve(rhs.nnz()); if constexpr(blaze::IsResizable_v<CtrT>) { lhs.resize(rhs.dim_); } else { throw std::runtime_error("Cannot resize lhs to match rhs\n"); } lhs.reset(); if constexpr(blaze::IsSparseVector_v<CtrT>) { lhs.reserve(nonZeros(rhs)); } for(const auto &pair: rhs) lhs[pair.index()] = pair.value(); } template<typename CtrT, typename DataT, typename IndicesT, typename IndPtrT, typename IT, typename WeightT=blz::DV<DataT>, typename RowSumsT=WeightT> void set_center(CtrT &ctr, const util::CSparseMatrix<DataT, IndicesT, IndPtrT> &mat, IT *asn, size_t nasn, WeightT *w = static_cast<WeightT *>(nullptr), RowSumsT *rs=static_cast<RowSumsT *>(nullptr)) { //std::fprintf(stderr, "[%s] Setting center, size before: \n", __PRETTY_FUNCTION__, ctr.size()); using VT = double; blz::DV<VT, blz::TransposeFlag_v<CtrT>> mv(mat.columns(), VT(0)); double wsum; if(w) wsum = sum(blaze::generate(nasn, [asn,w](auto x) {return (*w)[asn[x]];})); else wsum = nasn; const double winv = 1. / wsum; #ifdef _OPENMP #pragma omp parallel for schedule(dynamic, 16) reduction(+:wsum) #endif for(size_t i = 0; i < nasn; ++i) { auto r = row(mat, asn[i]); const double dmul = winv / (rs ? double((*rs)[i]): 1.); if(w) { VT itemw = (*w)[asn[i]] * dmul; SK_UNROLL_4 for(size_t i = 0; i < r.n_; ++i) { OMP_ATOMIC mv[r.indices_[i]] += r.data_[i] * itemw; } } else { for(const auto &pair: r) OMP_ATOMIC mv[pair.index()] += pair.value() * dmul; } } if(!wsum) wsum = nasn; ctr = mv; //std::fprintf(stderr, "[%s] Set center. size now: %zu\n", __PRETTY_FUNCTION__, ctr.size()); } template<typename CtrT, typename DataT, typename IndicesT, typename IndPtrT, typename IT, typename WeightT> void set_center_l2(CtrT &center, const util::CSparseMatrix<DataT, IndicesT, IndPtrT> &mat, IT *asp, size_t nasn, WeightT *weights, double eps=0.) { util::geomedian(mat, center, asp, nasn, weights, eps); } template<typename VT, typename Alloc, typename IT> decltype(auto) elements(const std::vector<VT, Alloc> &w, IT *asp, size_t nasn) { return elements(blz::make_cv(&w[0], w.size()), asp, nasn); } template<typename CtrT, typename MT, typename IT, typename WeightT> void set_center_l2(CtrT &center, const blaze::Matrix<MT, blaze::rowMajor> &mat, IT *asp, size_t nasn, WeightT *weights, double eps=0.) { auto rowsel = rows(mat, asp, nasn); VERBOSE_ONLY(std::cerr << "Calculating geometric median for " << nasn << " rows and storing in " << center << '\n';) if(weights) blz::geomedian(rowsel, center, elements(*weights, asp, nasn), eps); else blz::geomedian(rowsel, center, eps); VERBOSE_ONLY(std::cerr << "Calculated geometric median; new values: " << ctrs[i] << '\n';) } template<typename CtrT, typename MT, bool SO, typename IT, typename WeightT=blz::DV<blz::ElementType_t<MT>>, typename RowSumsT=WeightT> void set_center(CtrT &ctr, const blaze::Matrix<MT, SO> &mat, IT *asp, size_t nasn, WeightT *w = static_cast<WeightT*>(nullptr), RowSumsT *rs=static_cast<RowSumsT *>(nullptr)) { auto rowsel = rows(*mat, asp, nasn); const size_t nc = (*mat).columns(); //std::cerr << "rowsel: " << rowsel << '\n'; //std::fprintf(stderr, "setting center, %s weights, %s rowsums\n", w ? "with": "without", rs ? "with": "without"); if(w) { auto elsel = elements(*w, asp, nasn); // weighted sum over total weight -> weighted mean if(rs) { const auto rinv = evaluate(1. / elements(*rs, asp, nasn)); if constexpr(blz::TransposeFlag_v<decltype(rinv)> == blz::TransposeFlag_v<decltype(elsel)>) ctr = blaze::sum<blaze::columnwise>(rowsel % blaze::expand(elsel * rinv, nc)) / sum(elsel); else ctr = blaze::sum<blaze::columnwise>(rowsel % blaze::expand(elsel * trans(rinv), nc)) / sum(elsel); } else { ctr = blaze::sum<blaze::columnwise>(rowsel % blaze::expand(elsel, nc)) / sum(elsel); } } else { if(rs) { const auto rinv = evaluate(1. / elements(*rs, asp, nasn)); if constexpr(blz::TransposeFlag_v<decltype(rinv)> == SO) { auto expmat = blaze::expand(trans(rinv), nc); ctr = blaze::mean<blaze::columnwise>(rowsel % trans(expmat)); } else { auto expmat = blaze::expand(rinv, nc); ctr = blaze::mean<blaze::columnwise>(rowsel % expmat); } } else { //std::fprintf(stderr, "no rowsums provided\n"); ctr = blaze::mean<blaze::columnwise>(rowsel); #if 0 auto seval = evaluate(blaze::sum<blz::columnwise>(rowsel) / double(nasn)); const double mdiff = sum(abs(ctr - seval)), sevalnorm = blz::l2Norm(seval); assert(sum(abs(ctr - seval)) < 1e-4 * sevalnorm || !std::fprintf(stderr, "mdiff: %g. norm: %g\n", mdiff, sevalnorm)); #endif } } //std::fprintf(stderr, "Total distance from center to all relevant rows after: %g\n", blaze::sum(blaze::generate(nasn, [&](auto idx) {return blz::sqrL2Dist(ctr, row(rowsel, idx));}))); //std::cerr << ctr << '\n'; } using namespace ::minicore::distance; static constexpr INLINE CentroidPol msr2pol(distance::DissimilarityMeasure msr) { switch(msr) { case ORACLE_METRIC: case ORACLE_PSEUDOMETRIC: default: return NOT_APPLICABLE; case UWLLR: case LLR: case MKL: case JSD: case SQRL2: case REVERSE_MKL: case ITAKURA_SAITO: case REVERSE_ITAKURA_SAITO: case SYMMETRIC_ITAKURA_SAITO: case RSYMMETRIC_ITAKURA_SAITO: case SRULRT: case SRLRT: case JSM: return JSM_MEDIAN; // These might work, but there's no guarantee it will work well. case COSINE_DISTANCE: case BHATTACHARYYA_METRIC: case BHATTACHARYYA_DISTANCE: case HELLINGER: return FULL_WEIGHTED_MEAN; case L2: return GEO_MEDIAN; case L1: return L1_MEDIAN; case TVD: return TVD_MEDIAN; } } using util::l1_median; using coresets::l1_median; struct CentroidPolicy { template<typename VT, bool TF, typename Range, typename VT2=VT, typename RowSums> static void perform_average(blaze::DenseVector<VT, TF> &ret, const Range &r, const RowSums &rs, const VT2 *wc = static_cast<VT2 *>(nullptr), dist::DissimilarityMeasure measure=static_cast<dist::DissimilarityMeasure>(-1)) { using FT = blz::ElementType_t<VT>; PREC_REQ(measure != static_cast<dist::DissimilarityMeasure>(-1), "Must define dissimilarity measure"); if(measure == dist::TOTAL_VARIATION_DISTANCE) { if(wc) coresets::l1_median(r, ret, wc->data()); else coresets::l1_median(r, ret); } else if(measure == dist::L1) { std::conditional_t<blz::IsSparseMatrix_v<Range>, blz::CompressedMatrix<FT, blz::StorageOrder_v<Range> >, blz::DynamicMatrix<FT, blz::StorageOrder_v<Range> > > cm = r % blz::expand(trans(rs), r.columns()); if(wc) coresets::l1_median(cm, ret, wc->data()); else coresets::l1_median(cm, ret); } else if(measure == dist::LLR || measure == dist::UWLLR) { PRETTY_SAY << "LLR test\n"; FT total_sum_inv; if(wc) { total_sum_inv = 1. / blz::dot(rs, *wc); *ret = blaze::sum<blz::columnwise>(r % blz::expand(trans(*wc * rs), r.columns())) * total_sum_inv; } else { total_sum_inv = 1. / blaze::sum(rs); *ret = blaze::sum<blz::columnwise>(r % blz::expand(trans(rs), r.columns())) * total_sum_inv; } } else if(wc) { PRETTY_SAY << "Weighted, anything but L1 or LLR (" << dist::detail::prob2str(measure) << ")\n"; assert((*(*wc)).size() == r.rows()); assert(blz::expand(*(*wc), r.columns()).rows() == r.rows()); assert(blz::expand(*(*wc), r.columns()).columns() == r.columns()); auto wsuminv = 1. / blaze::sum(*wc); if(!dist::detail::is_probability(measure)) { // e.g., take mean of unscaled values auto mat2schur = blz::expand(*(*wc) * rs, r.columns()); PRETTY_SAY << "NOTPROB r dims: " << r.rows() << "/" << r.columns() << '\n'; PRETTY_SAY << "NOTPROB mat2schur dims: " << mat2schur.rows() << "/" << mat2schur.columns() << '\n'; *ret = blaze::sum<blz::columnwise>(r % blz::expand(*(*wc) * rs, r.columns())) * wsuminv; } else { // Else take mean of scaled values auto mat2schur = blz::expand(*(*wc), r.columns()); PRETTY_SAY << "PROB r dims: " << r.rows() << "/" << r.columns() << '\n'; PRETTY_SAY << "PROB mat2schur dims: " << mat2schur.rows() << "/" << mat2schur.columns() << '\n'; *ret = blaze::sum<blz::columnwise>(r % blz::expand(*(*wc), r.columns())) * wsuminv; assert(blaze::max(*ret) < 1. || !std::fprintf(stderr, "max in ret: %g for a probability distribution.", blaze::max(*ret))); } } else { PRETTY_SAY << "Unweighted, anything but L1 or LLR (" << dist::detail::prob2str(measure) << ")\n"; if(dist::detail::is_probability(measure)) { // Weighted average for all #ifndef NDEBUG auto expansion = blz::expand(trans(rs), r.columns()); PRETTY_SAY << "PROB r dims: " << r.rows() << "/" << r.columns() << '\n'; PRETTY_SAY << "NOTPROB expansion dims: " << expansion.rows() << "/" << expansion.columns() << '\n'; #endif *ret = blaze::sum<blz::columnwise>(r % blz::expand(trans(rs), r.columns())) * (1. / (blaze::sum(rs) * r.rows())); } else *ret = blz::mean<blz::columnwise>(r % blz::expand(trans(rs), r.columns())); } } template<typename Matrix, typename RSVec, typename PriorData=RSVec, typename FT=blz::ElementType_t<Matrix>, typename AsnV, typename WPT=blz::DV<FT, blz::rowVector>, bool WSO=blz::rowVector> static void perform_average(Matrix &mat, const RSVec &rs, std::vector<blz::DV<FT, blz::rowVector>> &centers, AsnV &assignments, dist::DissimilarityMeasure measure, const blaze::Vector<WPT, WSO> *weight_cv=nullptr, const PriorData *pd=nullptr) { // Scale weights up if necessary std::vector<blaze::SmallArray<uint32_t, 16>> assignv(centers.size()); for(size_t i = 0; i < assignments.size(); ++i) { assert(assignments[i] < centers.size()); assignv.at(assignments[i]).pushBack(i); } if(measure == dist::TVD || measure == dist::L1) { using ptr_t = decltype((**weight_cv).data()); ptr_t ptr = nullptr; if(weight_cv) ptr = (**weight_cv).data(); for(unsigned i = 0; i < centers.size(); ++i) { coresets::l1_median(mat, centers[i], assignv[i], ptr); } return; } [[maybe_unused]] auto pv = pd ? FT(pd->operator[](0)): FT(1.); assert(!pd || pd->size() == 1); // TODO: support varied prior over features for(unsigned i = 0; i < centers.size(); ++i) { auto aip = assignv[i].data(); auto ain = assignv[i].size(); auto r(blz::rows(mat, aip, ain)); auto &c(centers[i]); if(weight_cv) { c = blaze::sum<blaze::columnwise>( blz::rows(mat, aip, ain) % blaze::expand(blaze::elements(trans(**weight_cv), aip, ain), mat.columns())); } else { c = blaze::sum<blaze::columnwise>(blz::rows(mat, aip, ain)); } assert(rs.size() == mat.rows()); if constexpr(blaze::IsSparseMatrix_v<Matrix>) { if(pd) { if(weight_cv) { c += pv * sum(blz::elements(rs * **weight_cv, aip, ain)); } else { c += pv * ain; } for(const auto ri: assignv[i]) { assert(ri < rs.size()); assert(ri < mat.rows()); auto rsri = pv; if(!use_scaled_centers(measure)) rsri /= rs[ri]; for(const auto &pair: row(mat, ri, unchecked)) c[pair.index()] -= rsri; } } } double div; if(measure == dist::LLR || measure == dist::UWLLR) { if(weight_cv) div = sum(blz::elements(rs * **weight_cv, aip, ain)); else div = sum(blz::elements(rs, aip, ain)); } else { if(weight_cv) { div = sum(**weight_cv); } else { div = ain; } } auto oldnorm = blaze::l2Norm(c); c *= (1. / div); auto newnorm = blaze::l2Norm(c); assert(min(c) >= 0 || !std::fprintf(stderr, "min center loc: %g\n", min(c))); } } template<typename FT, typename Row, typename Src> static void __perform_increment(FT neww, FT cw, Row &ret, const Src &dat, FT row_sum, dist::DissimilarityMeasure measure) { if(measure == dist::L1 || measure == dist::TOTAL_VARIATION_DISTANCE) throw std::invalid_argument("__perform_increment is only for linearly-calculated means, not l1 median"); if(cw == 0.) { if(dist::detail::is_probability(measure)) ret = dat; else ret = dat * row_sum; } else { auto div = neww / (neww + cw); if(dist::detail::is_probability(measure)) { ret += (dat - ret) * div; } else if(measure == dist::LLR || measure == dist::UWLLR) { ret += (dat * row_sum) * neww; // Add up total sum and subtract later // since there are three weighting factors here: // First, partial assignment // Then point-wise weights (both of which are in neww) // Then, for LLR/UWLLR, there's weighting by the row-sums } else { // Maintain running mean for full vector value ret += (dat * row_sum - ret) * div; } } } template<typename VT, bool TF, typename RowSums, typename MatType, typename CenterCon, typename VT2=blz::DynamicVector<blz::ElementType_t<VT>> > static void perform_soft_assignment(const blz::DenseMatrix<VT, TF> &assignments, const RowSums &rs, OMP_ONLY(std::mutex *mutptr,) const MatType &data, CenterCon &newcon, const VT2 *wc = static_cast<const VT2 *>(nullptr), dist::DissimilarityMeasure measure=static_cast<dist::DissimilarityMeasure>(-1)) { using FT = blz::ElementType_t<VT>; PREC_REQ(measure != static_cast<dist::DissimilarityMeasure>(-1), "Must define dissimilarity measure"); if(measure == dist::L1 || measure == dist::TOTAL_VARIATION_DISTANCE) { OMP_PFOR for(unsigned j = 0; j < newcon.size(); ++j) { blz::DynamicVector<FT, blz::rowVector> newweights; { auto col = trans(column(assignments, j)); if(wc) newweights = col * *wc; else newweights = col; } if(measure == dist::L1) { std::conditional_t<blz::IsDenseMatrix_v<VT>, blz::DynamicMatrix<FT>, blz::CompressedMatrix<FT>> scaled_data = data % blz::expand(rs, data.columns()); coresets::l1_median(scaled_data, newcon[j], newweights.data()); } else { // TVD coresets::l1_median(data, newcon[j], newweights.data()); } } } else { blz::DynamicVector<FT> summed_contribs(newcon.size(), 0.); OMP_PFOR for(size_t i = 0; i < data.rows(); ++i) { auto item_weight = wc ? wc->operator[](i): static_cast<FT>(1.); const auto row_sum = rs[i]; auto asn(row(assignments, i, unchecked)); for(size_t j = 0; j < newcon.size(); ++j) { auto &cw = summed_contribs[j]; if(auto asnw = asn[j]; asnw > 0.) { auto neww = item_weight * asnw; OMP_ONLY(if(mutptr) mutptr[j].lock();) __perform_increment(neww, cw, newcon[j], row(data, i, unchecked), row_sum, measure); OMP_ONLY(if(mutptr) mutptr[j].unlock();) OMP_ATOMIC cw += neww; } } } if(measure == dist::LLR || measure == dist::UWLLR) { OMP_PFOR for(auto i = 0u; i < newcon.size(); ++i) newcon[i] *= 1. / blz::dot(column(assignments, i), rs); } } } }; // CentroidPolicy template<typename FT=double, typename Mat, typename AsnT, typename CostsT, typename CtrsT, typename WeightsT, typename IT=uint32_t> void set_centroids_l1(const Mat &mat, AsnT &asn, CostsT &costs, CtrsT &ctrs, WeightsT *weights) { const unsigned k = ctrs.size(); using asn_t = std::decay_t<decltype(asn[0])>; std::vector<std::vector<asn_t>> assigned(ctrs.size()); assert(costs.size() == asn.size()); for(size_t i = 0; i < costs.size(); ++i) { assert(asn[i] < assigned.size()); blz::push_back(assigned[asn[i]], i); } blaze::SmallArray<asn_t, 16> sa; wy::WyRand<asn_t, 4> rng(costs.size()); for(unsigned i = 0; i < k; ++i) if(assigned[i].empty()) blz::push_back(sa, i); while(!sa.empty()) { std::vector<uint32_t> idxleft; for(unsigned i = 0; i < k; ++i) if(std::find(sa.begin(), sa.end(), i) == sa.end()) blz::push_back(idxleft, i); // Re-calculate for centers that have been removed for(auto idx: sa) { for(auto assigned_id: assigned[idx]) { auto ilit = idxleft.begin(); auto myr = row(mat, assigned_id); auto fcost = l1Dist(ctrs[*ilit++], myr); asn_t bestid = 0; for(;ilit != idxleft.end();++ilit) { auto ncost = l1Dist(ctrs[*ilit], myr); if(ncost < fcost) bestid = *ilit, fcost = ncost; } costs[assigned_id] = fcost; asn[assigned_id] = bestid; } } // Use D2 sampling to re-seed for(const auto idx: sa) { std::ptrdiff_t found = reservoir_simd::sample(costs.data(), costs.size(), rng()); set_center(ctrs[idx], row(mat, found)); for(size_t i = 0; i < mat.rows(); ++i) { const auto c = l1Dist(ctrs[idx], row(mat, i, unchecked)); if(c < costs[i]) { asn[i] = idx; costs[i] = c; } } } for(auto &subasn: assigned) subasn.clear(); // Check for orphans again sa.clear(); for(size_t i = 0; i < costs.size(); ++i) { blz::push_back(assigned[asn[i]], i); } for(const auto &subasn: assigned) if(subasn.empty()) sa.pushBack(&subasn - assigned.data()); } for(unsigned i = 0; i < k; ++i) { const auto &asnv = assigned[i]; const auto asp = asnv.data(); const auto nasn = asnv.size(); MINOCORE_VALIDATE(nasn != 0); switch(nasn) { case 1: set_center(ctrs[i], row(mat, asnv[0])); break; default: { if constexpr(blaze::IsMatrix_v<Mat>) { auto rowsel = rows(mat, asp, nasn); if(weights) l1_median(rowsel, ctrs[i], elements(*weights, asp, nasn)); else l1_median(rowsel, ctrs[i]); } else l1_median(mat, ctrs[i], asp, nasn, weights); } break; } } } using util::tvd_median; using coresets::tvd_median; template<typename FT=double, typename Mat, typename AsnT, typename CostsT, typename CtrsT, typename WeightsT, typename IT=uint32_t, typename RowSums> void set_centroids_tvd(const Mat &mat, AsnT &asn, CostsT &costs, CtrsT &ctrs, WeightsT *weights, const RowSums &rsums) { const unsigned k = ctrs.size(); using asn_t = std::decay_t<decltype(asn[0])>; std::vector<std::vector<asn_t>> assigned(ctrs.size()); assert(costs.size() == asn.size()); for(size_t i = 0; i < costs.size(); ++i) { assert(asn[i] < assigned.size()); blz::push_back(assigned[asn[i]], i); } blaze::SmallArray<asn_t, 16> sa; wy::WyRand<asn_t, 4> rng(costs.size()); for(unsigned i = 0; i < k; ++i) if(assigned[i].empty()) blz::push_back(sa, i); while(!sa.empty()) { std::vector<uint32_t> idxleft; for(unsigned i = 0; i < k; ++i) if(std::find(sa.begin(), sa.end(), i) == sa.end()) blz::push_back(idxleft, i); // Re-calculate for centers that have been removed for(auto idx: sa) { for(auto assigned_id: assigned[idx]) { auto ilit = idxleft.begin(); auto myr = row(mat, assigned_id); auto fcost = l1Dist(ctrs[*ilit++], myr); asn_t bestid = 0; for(;ilit != idxleft.end();++ilit) { auto ncost = l1Dist(ctrs[*ilit], myr); if(ncost < fcost) bestid = *ilit, fcost = ncost; } costs[assigned_id] = fcost; asn[assigned_id] = bestid; } } // Use D2 sampling to re-seed for(const auto idx: sa) { std::ptrdiff_t found = reservoir_simd::sample(costs.data(), costs.size(), rng()); assert(found < (std::ptrdiff_t)(costs.size())); set_center(ctrs[idx], row(mat, found)); for(size_t i = 0; i < mat.rows(); ++i) { const auto c = l1Dist(ctrs[idx], row(mat, i, unchecked)); if(c < costs[i]) { asn[i] = idx; costs[i] = c; } } } for(auto &subasn: assigned) subasn.clear(); // Check for orphans again sa.clear(); for(size_t i = 0; i < costs.size(); ++i) { blz::push_back(assigned[asn[i]], i); } for(const auto &subasn: assigned) if(subasn.empty()) sa.pushBack(&subasn - assigned.data()); } for(unsigned i = 0; i < k; ++i) { const auto &asnv = assigned[i]; const auto asp = asnv.data(); const auto nasn = asnv.size(); MINOCORE_VALIDATE(nasn != 0); switch(nasn) { case 1: set_center(ctrs[i], row(mat, asnv[0])); break; default: tvd_median(mat, ctrs[i], asp, nasn, weights, rsums); break; } } } template<typename...Args> void set_centroids_tvd(Args &&...args) { throw std::invalid_argument("TVD clustering not supported explicitly; instead, normalize your count vectors and perform the clustering with L1"); } template<typename FT=double, typename Mat, typename AsnT, typename CostsT, typename CtrsT, typename WeightsT, typename IT=uint32_t> void set_centroids_l2(const Mat &mat, AsnT &asn, CostsT &costs, CtrsT &ctrs, WeightsT *weights, double eps=0.) { using asn_t = std::decay_t<decltype(asn[0])>; std::vector<std::vector<asn_t>> assigned(ctrs.size()); const size_t np = costs.size(); const unsigned k = ctrs.size(); for(size_t i = 0; i < np; ++i) { blz::push_back(assigned[asn[i]], i); } wy::WyRand<asn_t, 4> rng(costs.size()); blaze::SmallArray<asn_t, 16> sa; for(unsigned i = 0; i < k; ++i) if(assigned[i].empty()) blz::push_back(sa, i); while(!sa.empty()) { // Compute partial sum std::vector<uint32_t> idxleft; for(unsigned int i = 0; i < k; ++i) if(std::find(sa.begin(), sa.end(), i) == sa.end()) blz::push_back(idxleft, i); // Re-calculate for centers that have been removed for(auto idx: sa) { for(auto assigned_id: assigned[idx]) { auto ilit = idxleft.begin(); auto myr = row(mat, assigned_id); auto fcost = l2Dist(ctrs[*ilit++], myr); asn_t bestid = 0; for(;ilit != idxleft.end();++ilit) { auto ncost = l2Dist(ctrs[*ilit], myr); if(ncost < fcost) bestid = *ilit, fcost = ncost; } costs[assigned_id] = fcost; asn[assigned_id] = bestid; } } // Use D2 sampling to re-seed for(const auto idx: sa) { std::ptrdiff_t found = reservoir_simd::sample(costs.data(), costs.size(), rng()); set_center(ctrs[idx], row(mat, found)); OMP_PFOR for(size_t i = 0; i < mat.rows(); ++i) { const auto c = l2Dist(ctrs[idx], row(mat, i, unchecked)); if(c < costs[i]) { asn[i] = idx; costs[i] = c; } } } for(auto &subasn: assigned) subasn.clear(); // Check for orphans again sa.clear(); for(size_t i = 0; i < np; ++i) { blz::push_back(assigned[asn[i]], i); } for(const auto &subasn: assigned) if(subasn.empty()) sa.pushBack(&subasn - assigned.data()); } for(unsigned i = 0; i < k; ++i) { const auto nasn = assigned[i].size(); const auto asp = assigned[i].data(); MINOCORE_VALIDATE(nasn != 0); if(nasn == 1) { set_center(ctrs[i], row(mat, *asp)); } else { set_center_l2(ctrs[i], mat, asp, nasn, weights, eps); } } } template<typename FT=double, typename Mat, typename PriorT, typename AsnT, typename CostsT, typename CtrsT, typename WeightsT, typename IT=uint32_t, typename SumT> bool set_centroids_full_mean(const Mat &mat, const dist::DissimilarityMeasure measure, const PriorT &prior, AsnT &asn, CostsT &costs, CtrsT &ctrs, WeightsT *weights, SumT &ctrsums, const SumT &rowsums) { const bool isnorm = msr_is_normalized(measure); static_assert(std::is_floating_point_v<std::decay_t<decltype(ctrsums[0])>>, "SumT must be floating-point"); assert(rowsums.size() == (*mat).rows()); assert(ctrsums.size() == ctrs.size()); DBG_ONLY(std::fprintf(stderr, "[%s] Calling set_centroids_full_mean with weights = %p\n", __PRETTY_FUNCTION__, (void *)weights);) // assert(asn.size() == costs.size() || !std::fprintf(stderr, "asn size %zu, cost size %zu\n", asn.size(), costs.size())); blaze::SmallArray<size_t, 16> sa; wy::WyRand<size_t, 4> rng(costs.size()); // Used for restarting orphaned centers const size_t np = costs.size(), k = ctrs.size(); auto assigned = std::make_unique<std::vector<size_t>[]>(k); OMP_ONLY(std::unique_ptr<std::mutex[]> locks(new std::mutex[k]);) for(size_t i = 0; i < np; ++i) { OMP_ONLY(std::lock_guard<std::mutex> lock(locks[asn[i]]);) assigned[asn[i]].push_back(i); } for(unsigned i = 0; i < k; ++i) if(assigned[i].empty()) blz::push_back(sa, i); #ifndef NDEBUG int nfails = 0; for(size_t i = 0; i < k; ++i) { //const auto manual_sum = std::accumulate(ctrs[i].begin(), ctrs[i].end(), 0., [](double x, auto &pair) {return x + pair.value();}); //std::fprintf(stderr, "csum[%zu] (cached) %g, but calcualted: %g (via blaze) vs manual %g\n", i, ctrsums[i], sum(ctrs[i]), manual_sum); if(std::abs(ctrsums[i] - sum(ctrs[i])) > 1e-5) { ++nfails; } } assert(!nfails); #endif bool restarted_any = false; if(const size_t ne = sa.size()) { restarted_any = true; char buf[256]; const auto pv = prior.size() ? FT(prior[0]): FT(0); std::sprintf(buf, "Restarting centers with no support for set_centroids_full_mean: %s as measure with prior of size %zu (%g)\n", msr2str(measure), prior.size(), pv); std::cerr << buf; const constexpr RestartMethodPol restartpol = RESTART_D2; const FT psum = prior.size() == 1 ? FT(prior[0]) * prior.size(): sum(prior); OMP_PFOR for(size_t i = 0; i < k; ++i) assigned[i].clear(); std::vector<std::ptrdiff_t> rs; for(size_t i = 0; i < ne; ++i) { // Instead, use a temporary buffer to store partial sums and randomly select newly-started centers // for D2, and just ran std::ptrdiff_t r; if(restartpol == RESTART_GREEDY) r = reservoir_simd::argmax(costs, /*mt=*/true); else if(restartpol == RESTART_RANDOM) r = rng() % costs.size(); else { assert(restartpol == RESTART_D2); r = reservoir_simd::sample(costs.data(), costs.size(), rng()); } rs.push_back(r); const auto id = sa[i]; if(isnorm) { set_center(ctrs[id], row(mat, r) / rowsums[r]); } else set_center(ctrs[id], row(mat, r)); ctrsums[id] = sum(ctrs[id]); } costs = std::numeric_limits<FT>::max(); OMP_PFOR for(size_t i = 0; i < np; ++i) { unsigned bestid = 0; auto r = row(mat, i, unchecked); const auto rsum = rowsums[i]; costs[i] = cmp::msr_with_prior(measure, r, ctrs[0], prior, psum, rsum, ctrsums[0]); //assert(std::abs(sum(r) - rsum) < 1e-6 || !std::fprintf(stderr, "rsum %g and summed %g\n", rsum, sum(r))); for(unsigned j = 0; j < k; ++j) { const auto csum = ctrsums[j]; //DBG_ONLY(auto bsum = sum(ctrs[j]);) //assert(std::abs(csum - bsum) < 1e-10 || !std::fprintf(stderr, "for k = %u, csum %g but found bsum %g\n", j, csum, bsum)); const auto c = cmp::msr_with_prior(measure, r, ctrs[j], prior, psum, rsum, csum); if(c < costs[i]) { costs[i] = c, bestid = j; } } asn[i] = bestid; OMP_CRITICAL { assigned[bestid].push_back(i); } } for(size_t i = 0; i < ne; ++i) { auto pid = rs[i]; const auto cid = sa[i]; if(asn[pid] != cid) { asn[pid] = cid; costs[pid] = 0.; assigned[cid].push_back(pid); } } } OMP_PFOR for(unsigned i = 0; i < k; ++i) shared::sort(assigned[i].begin(), assigned[i].end()); for(unsigned i = 0; i < k; ++i) { const auto nasn = assigned[i].size(); const auto asp = assigned[i].data(); auto &ctr = ctrs[i]; if(nasn == 0) continue; else if(nasn == 1) { auto mr = row(mat, *asp); assert(ctr.size() == mr.size()); if(isnorm) { set_center(ctr, mr / rowsums[*asp]); } else { set_center(ctr, mr); } } else { set_center(ctr, mat, asp, nasn, weights, isnorm ? &rowsums: static_cast<const SumT *>(nullptr)); if(isnan(ctr)) { []() __attribute__((noinline,cold)) { throw std::runtime_error("Unexpected nan in ctr"); }(); } } } return restarted_any; } template<typename Vector, typename AT, bool ATF> INLINE void correct_softmax(const Vector &costs, blaze::Vector<AT, ATF> &asn) { using CT = std::common_type_t<blz::ElementType_t<Vector>, blz::ElementType_t<AT>>; using FT = std::conditional_t<std::is_floating_point_v<CT>, CT, std::conditional_t<(sizeof(CT) <= 4), float, double>>; if(isnan(*asn)) { auto bestind = reservoir_simd::argmin(costs.data(), costs.size(), /*mt=*/false); blaze::SmallArray<uint32_t, 8> sa; for(unsigned i = 0; i < costs.size(); ++i) if(costs[i] == costs[bestind]) sa.pushBack(i); FT per = 1. / sa.size(); (*asn).reset(); for(const auto ind: sa) (*asn)[ind] = per; } } template<typename FT=double, typename Mat, typename PriorT, typename CostsT, typename CtrsT, typename WeightsT, typename IT=uint32_t, typename SumT, typename RSumT> double set_centroids_full_mean(const Mat &mat, const dist::DissimilarityMeasure measure, const PriorT &, CostsT &costs, CostsT &asns, CtrsT &ctrs, WeightsT *weights, FT temp, SumT &ctrsums, const RSumT &rowsums) { assert(ctrsums.size() == ctrs.size()); const unsigned k = ctrs.size(); asns = softmax<rowwise>(costs * -temp); double ret = 0.; OMP_PRAGMA("omp parallel for reduction(+:ret)") for(size_t i = 0; i < asns.rows(); ++i) { auto cr = row(costs, i, unchecked); auto r = row(asns, i, unchecked); //std::cerr << "costs: " << cr << '\n' << r << '\n'; correct_softmax(cr, r); ret += dot(cr, r) * (weights ? double((*weights)[i]): 1.); //std::cerr << "post corrected: " << cr << '\n' << r << '\n'; } //std::fprintf(stderr, "Sum of costs: %g\n", ret); //std::fprintf(stderr, "Now setting centers\n"); if(measure == distance::L2 || measure == distance::L1) { //OMP_PFOR for(size_t i = 0; i < k; ++i) { blz::DV<FT, blz::columnVector> colweights; if(!weights) { colweights = column(asns, i, unchecked); } else if constexpr(blz::TransposeFlag_v<WeightsT> == blz::columnVector) { colweights = *weights * column(asns, i, unchecked); } else { colweights = trans(*weights) * column(asns, i, unchecked); } if(measure == distance::L2) { //std::fprintf(stderr, "l2 geomedian\n"); if constexpr(blaze::IsMatrix_v<Mat>) { geomedian(mat, ctrs[i], colweights.data()); } else { geomedian(mat, ctrs[i], (uint64_t *)nullptr, 0, &colweights); } } else { //std::fprintf(stderr, "l1median\n"); l1_median(mat, ctrs[i], (uint64_t *)nullptr, 0, &colweights); } } } else { // full weighted mean (Bregman) const bool isnorm = msr_is_normalized(measure); if(weights) { blz::DV<FT, rowVector> wsums; if constexpr(blz::TransposeFlag_v<WeightsT> == blz::columnVector) { wsums = 1. / sum<columnwise>(asns % expand(trans(*weights), asns.columns())); } else { wsums = 1. / sum<columnwise>(asns % expand(*weights, asns.columns())); } for(size_t i = 0; i < k; ++i) { if constexpr(blaze::TransposeFlag_v<WeightsT> != blaze::TransposeFlag_v<decltype(column(asns, i))>) { if(isnorm) { ctrs[i] = (blz::sum<blz::columnwise>(mat % expand(column(asns, i) * trans(*weights) / rowsums, mat.columns())) * wsums[i]); } else { ctrs[i] = (blz::sum<blz::columnwise>(mat % expand(column(asns, i) * trans(*weights), mat.columns())) * wsums[i]); } } else { ctrs[i] = blz::sum<blz::columnwise>(mat % expand(column(asns, i) * *weights, mat.columns())) * wsums[i]; } } } else { blz::DV<FT> wsums = trans(1. / sum<columnwise>(asns)); for(size_t i = 0; i < k; ++i) { if(isnorm) { auto expmat = expand(column(asns, i) / rowsums, mat.columns()); ctrs[i] = (blz::sum<blz::columnwise>(mat % expmat) * wsums[i]); } else { auto expmat = expand(column(asns, i), mat.columns()); ctrs[i] = (blz::sum<blz::columnwise>(mat % expmat) * wsums[i]); } assert(ctrs[i].size() == mat.columns()); } } } for(size_t i = 0; i < k; ++i) ctrsums[i] = sum(ctrs[i]); DBG_ONLY(std::fprintf(stderr, "Centroids set, soft, with T = %g. Center sums: \n\n", temp);) return ret; } template<typename FT=double, typename VT, typename IT, typename IPtrT, typename PriorT, typename CostsT, typename CtrsT, typename WeightsT, typename SumT> double set_centroids_full_mean(const util::CSparseMatrix<VT, IT, IPtrT> &mat, const dist::DissimilarityMeasure measure, const PriorT &, CostsT &costs, CostsT &asns, CtrsT &ctrs, WeightsT *weights, FT temp, SumT &ctrsums, const SumT &rowsums) { assert(ctrsums.size() == ctrs.size()); DBG_ONLY(std::fprintf(stderr, "Calling set_centroids_full_mean with weights = %p, temp = %g\n", (void *)weights, temp);) assert(min(costs) >= 0. || !std::fprintf(stderr, "mincost: %g\n", min(costs))); asns = softmax<rowwise>(costs * -temp); double ret = 0.; OMP_PRAGMA("omp parallel for reduction(+:ret)") for(size_t i = 0; i < asns.rows(); ++i) { auto r(row(asns, i, unchecked)); auto cr(row(costs, i, unchecked)); correct_softmax(cr, r); const double w = weights ? double((*weights)[i]): 1.; ret += dot(cr, r) * w; } std::vector<blz::DV<FT>> tmprows(ctrs.size(), blz::DV<FT>(mat.columns(), 0.)); if(measure == distance::L2 || measure == distance::L1) { //OMP_PFOR for(size_t i = 0; i < ctrs.size(); ++i) { blz::DV<FT, blz::columnVector> colweights; if(!weights) { colweights = column(asns, i, unchecked); } else if constexpr(blz::TransposeFlag_v<WeightsT> == blz::columnVector) { colweights = *weights * column(asns, i, unchecked); } else { colweights = trans(*weights) * column(asns, i, unchecked); } std::fprintf(stderr, "Weights selected for row %zu/%zu\n", i + 1, ctrs.size()); uint64_t *np = 0; if(measure == distance::L2) geomedian(mat, tmprows[i], np, 0, &colweights); else l1_median(mat, tmprows[i], np, 0, &colweights); std::fprintf(stderr, "Centroid selected for row %zu/%zu\n", i + 1, ctrs.size()); if constexpr(blz::TransposeFlag_v<std::decay_t<decltype(ctrs[0])>> == blaze::rowVector) { ctrs[i] = trans(tmprows[i]); } else { ctrs[i] = tmprows[i]; } } } else { const bool isnorm = msr_is_normalized(measure); OMP_PFOR for(size_t j = 0; j < mat.rows(); ++j) { auto r = row(mat, j, unchecked); auto smr = row(asns, j, unchecked); const double dmul = isnorm ? 1. / rowsums[j]: 1.; for(size_t i = 0; i < r.n_; ++i) { double data = r.data_[i] * dmul; auto idx = r.indices_[i]; size_t m = 0; for(; m < ctrs.size(); ++m) { OMP_ATOMIC tmprows[m][idx] += smr[m] * data; } } } blz::DV<FT, columnVector> winv; if(weights) { if constexpr(blz::TransposeFlag_v<WeightsT> == rowVector) { winv = trans(1. / (*weights * asns)); } else { winv = 1. / trans((trans(*weights) * asns)); } } else { winv = 1. / trans(sum<columnwise>(asns)); } //OMP_PFOR for(size_t i = 0; i < tmprows.size(); ++i) { if constexpr(blz::TransposeFlag_v<std::decay_t<decltype(ctrs[0])>> == blaze::rowVector) { ctrs[i] = trans(tmprows[i] * winv[i]); } else { ctrs[i] = tmprows[i] * winv[i]; } } } for(size_t i = 0; i < ctrs.size(); ++i) ctrsums[i] = sum(ctrs[i]); //for(const auto &ctr: ctrs) std::cerr << ctr << '\n'; DBG_ONLY(std::fprintf(stderr, "Centroids set, soft, with T = %g\n", temp);) return ret; } } } // namespace minicore::clustering #endif /* MINOCORE_CLUSTERING_CENTROID_H__ */
kmp_set_dispatch_buf.c
// RUN: %libomp-compile && %libomp-run 7 // RUN: %libomp-run 0 && %libomp-run -1 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run 7 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // UNSUPPORTED: clang-11 #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 7 #define MY_MAX 200 #define MY_MIN -200 #ifndef MY_SCHEDULE # define MY_SCHEDULE dynamic #endif int num_disp_buffers, num_loops; int a, b, a_known_value, b_known_value; int test_kmp_set_disp_num_buffers() { int success = 1; a = 0; b = 0; // run many small dynamic loops to stress the dispatch buffer system #pragma omp parallel { int i,j; for (j = 0; j < num_loops; j++) { #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } } // detect failure if (a != a_known_value || b != b_known_value) { success = 0; printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); } return success; } int main(int argc, char** argv) { int i,j; int num_failed=0; if (argc != 2) { fprintf(stderr, "usage: %s num_disp_buffers\n", argv[0]); exit(1); } // set the number of dispatch buffers num_disp_buffers = atoi(argv[1]); kmp_set_disp_num_buffers(num_disp_buffers); // figure out the known values to compare with calculated result a_known_value = 0; b_known_value = 0; // if specified to use bad num_disp_buffers set num_loops // to something reasonable if (num_disp_buffers <= 0) num_loops = 10; else num_loops = num_disp_buffers*10; for (j = 0; j < num_loops; j++) { for (i = MY_MIN; i < MY_MAX; i+=INCR) a_known_value++; for (i = MY_MAX; i >= MY_MIN; i-=INCR) b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_disp_num_buffers()) { num_failed++; } } return num_failed; }
dynamic_smagorinsky_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // // System includes #include <vector> #include <map> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "utilities/openmp_utils.h" #include "utilities/parallel_utilities.h" #include "utilities/geometry_utilities.h" #include "includes/cfd_variables.h" #include "fluid_dynamics_application_variables.h" #include "includes/global_pointer_variables.h" #ifndef KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED #define KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Helper class to dynamically determine a value for the Smagorinsly parameter. /** This class uses the Variational Germano Identity to determine a value for the Smagorinsky parameter. This value is stored in the elemental variable C_SMAGORINSKY, the element implementation is responsible for using it. The ability to assign different values to different patches of elements (identified by the PATCH_INDEX variable) is supported, although it tends to produce unreliable results due to a 0/0 indetermination in patches with smooth velocity fields. This class is based in Oberai, A.A. and Wanderer, J., Variational formulation of the Germano identity for the Navier Stokes equations, Journal of Turbulence, 2005, vol 6. Note that the formulation described there requires a nested mesh. It takes the model part containing a coarse mesh as input and assumes that all elements will be subdivided before CalculateC() is called. Remember to call StoreCoarseMesh before refining the element, otherwise the coarse mesh will be lost. @see VMS for an element implementation that uses the Smagorinsky model. @see Local_Refine_Triangle_Mesh,Local_Refine_Tetrahedra_Mesh for the element refinement process. */ class DynamicSmagorinskyUtils { public: ///@name Life Cycle ///@{ /// Constructor /** @param rModelPart Reference to the model part containing the coarse mesh @param DomainSize Spatial dimension (2 or 3) */ DynamicSmagorinskyUtils(ModelPart& rModelPart, unsigned int DomainSize): mrModelPart(rModelPart), mDomainSize(DomainSize), mCoarseMesh(), mPatchIndices() {} /// Destructor ~DynamicSmagorinskyUtils() {} ///@} ///@name Operations ///@{ /// Store current mesh as coarse mesh. Call before refining. /** If you are refining more than once, this only has to be called before last refinement. */ void StoreCoarseMesh() { // Clear existing mesh (if any) mCoarseMesh.clear(); // Store current mesh for( ModelPart::ElementsContainerType::ptr_iterator itpElem = mrModelPart.Elements().ptr_begin(); itpElem != mrModelPart.Elements().ptr_end(); ++itpElem) { // (*itpElem)->GetValue(C_SMAGORINSKY) = 0.0; // Set the Smagorinsky parameter to zero for the coarse mesh (do this once to reset any input values) mCoarseMesh.push_back(*itpElem); } // Count the number of patches in the model (in parallel) const int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,ElementPartition); std::vector< std::vector<int> > LocalIndices(NumThreads); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mCoarseMesh.begin() + ElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mCoarseMesh.begin() + ElementPartition[k+1]; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { this->AddNewIndex(LocalIndices[k],itElem->GetValue(PATCH_INDEX)); } } // Combine the partial lists and create a map for PATCH_INDEX -> Vector position unsigned int Counter = 0; std::pair<int, unsigned int> NewVal; std::pair< std::map<int, unsigned int>::iterator, bool > Result; for( std::vector< std::vector<int> >::iterator itList = LocalIndices.begin(); itList != LocalIndices.end(); ++itList ) { for( std::vector<int>::iterator itIndex = itList->begin(); itIndex != itList->end(); ++itIndex) { // Note that instering in map already sorts and checks for uniqueness NewVal.first = *itIndex; NewVal.second = Counter; Result = mPatchIndices.insert(NewVal); if (Result.second) ++Counter; } } } /// Provide a value for the Smagorinsky coefficient using the Variational Germano Identity void CalculateC() { // Update the velocity values for the terms that belong to the coarse mesh this->SetCoarseVel(); // Partitioning const int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector CoarseElementPartition,FineElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,CoarseElementPartition); OpenMPUtils::DivideInPartitions(mrModelPart.Elements().size(),NumThreads,FineElementPartition); // Initialize temporary containers unsigned int PatchNumber = mPatchIndices.size(); std::vector< std::vector<double> > GlobalPatchNum(NumThreads); // Numerator on each patch std::vector< std::vector<double> > GlobalPatchDen(NumThreads); // Denominator on each patch const double EnergyTol = 0.005; double TotalDissipation = 0; #pragma omp parallel reduction(+:TotalDissipation) { int k = OpenMPUtils::ThisThread(); // Initialize the iterator boundaries for this thread ModelPart::ElementsContainerType::iterator CoarseElemBegin = mCoarseMesh.begin() + CoarseElementPartition[k]; ModelPart::ElementsContainerType::iterator CoarseElemEnd = mCoarseMesh.begin() + CoarseElementPartition[k+1]; ModelPart::ElementsContainerType::iterator FineElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator FineElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; // Initialize some thread-local variables Vector LocalValues, LocalCoarseVel; Matrix LocalMassMatrix; ProcessInfo& rProcessInfo = mrModelPart.GetProcessInfo(); double Residual,Model; unsigned int PatchPosition; // Thread-local containers for the values in each patch std::vector<double>& rPatchNum = GlobalPatchNum[k]; std::vector<double>& rPatchDen = GlobalPatchDen[k]; rPatchNum.resize(PatchNumber,0.0);// Fill with zeros rPatchDen.resize(PatchNumber,0.0); if (mDomainSize == 2) { LocalValues.resize(9); LocalCoarseVel.resize(9); LocalMassMatrix.resize(9,9,false); array_1d<double,3> N; BoundedMatrix<double,3,2> DN_DX; BoundedMatrix<double,2,2> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } else // mDomainSize == 3 { LocalValues.resize(16); LocalCoarseVel.resize(16); LocalMassMatrix.resize(16,16,false); array_1d<double,4> N; BoundedMatrix<double,4,3> DN_DX; BoundedMatrix<double,3,3> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } } // Combine the results of each thread in position 0 for( std::vector< std::vector<double> >::iterator itNum = GlobalPatchNum.begin()+1, itDen = GlobalPatchDen.begin()+1; itNum != GlobalPatchNum.end(); ++itNum, ++itDen) { for( std::vector<double>::iterator TotalNum = GlobalPatchNum[0].begin(), LocalNum = itNum->begin(), TotalDen = GlobalPatchDen[0].begin(), LocalDen = itDen->begin(); TotalNum != GlobalPatchNum[0].end(); ++TotalNum,++LocalNum,++TotalDen,++LocalDen) { *TotalNum += *LocalNum; *TotalDen += *LocalDen; } } // Compute the smagorinsky coefficient for each patch by combining the values from each thread std::vector<double> PatchC(PatchNumber); double NumTol = EnergyTol * fabs(TotalDissipation); for( std::vector<double>::iterator itNum = GlobalPatchNum[0].begin(), itDen = GlobalPatchDen[0].begin(), itC = PatchC.begin(); itC != PatchC.end(); ++itNum, ++itDen, ++itC) { // If the dissipation we are "missing" by not considering Smagorinsky is small, do not use Smagorinsky (this avoids a division by ~0, as the denominator should go to zero too) if ( (fabs(*itNum) < NumTol) )//|| (fabs(*itDen) < 1.0e-12) ) *itC = 0.0; else *itC = sqrt( 0.5 * fabs( *itNum / *itDen ) ); } // Finally, assign each element its new smagorinsky value #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; unsigned int PatchPosition; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; itElem->GetValue(C_SMAGORINSKY) = PatchC[PatchPosition]; } } } /// For the bridge analysis problem, correct the boundary flag after the refinement. /** Remember to run this AFTER EACH REFINEMENT STEP Possible values for the variable: 1.0 inlet, 2.0 bridge surface, 3.0 outlet, 0.0 otherwise @param rThisVariable The Kratos variable used to identify the boundary */ void CorrectFlagValues(Variable<double>& rThisVariable = FLAG_VARIABLE) { // Loop over coarse mesh to evaluate all terms that do not involve the fine mesh const int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfNodes(),NumThreads,NodePartition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = mrModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = mrModelPart.NodesBegin() + NodePartition[k+1]; double Value0, Value1; for( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) // If the node is refined { Value0 = itNode->GetValue(FATHER_NODES)[0].FastGetSolutionStepValue(rThisVariable); Value1 = itNode->GetValue(FATHER_NODES)[1].FastGetSolutionStepValue(rThisVariable); if( Value0 != Value1 ) // If this node is problematic { if ( Value0 == 0.0 || Value1 == 0.0 ) { // if either of the parents is not on the boundary, this node is not on the boundary itNode->FastGetSolutionStepValue(rThisVariable) = 0.0; } /* All remaining cases are unlikely in well-posed problems, I'm arbitrarily giving priority to the outlet, so that the node is only inlet or bridge surface if both parents are */ else if( Value0 == 3.0 ) { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } else if( Value1 == 3.0 ) { // The node is only bridge surface if both parents are itNode->FastGetSolutionStepValue(rThisVariable) = Value1; } else // Default behaviour: Parent 0 takes precedence { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } } } } } } ///@} private: ///@name Member Variables ///@{ /// ModelPart of the fluid problem ModelPart& mrModelPart; /// Spatial dimenstion unsigned int mDomainSize; /// Container for the coarse mesh (the fine mesh is stored by the model part) ModelPart::ElementsContainerType mCoarseMesh; /// A map relating patch indices to positions in the internal storage arrays std::map<int, unsigned int> mPatchIndices; ///@name Private Operations ///@{ /// Calculate the "Coarse Mesh" velocity /** The operations on the coarse mesh are evaluated on the fine mesh, but using an averaged velocity on the nodes that only exist on the fine mesh. Velocity gradients calculated on the fine mesh using this average velocity will be equal to those that would be obtained using the coarse mesh. This function assigns the "coarse" velocity value to all nodes */ void SetCoarseVel() { /* Note: This loop can't be parallelized, as we are relying on the fact that refined nodes are at the end of the list and their parents will be updated before the refined nodes are reached. There is an alternative solution (always calculate the coarse mesh velocity from the historic database) which can be parallelized but won't work for multiple levels of refinement */ for( ModelPart::NodeIterator itNode = mrModelPart.NodesBegin(); itNode != mrModelPart.NodesEnd(); ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) { Node<3>& rParent1 = itNode->GetValue(FATHER_NODES)[0]; Node<3>& rParent2 = itNode->GetValue(FATHER_NODES)[1]; itNode->GetValue(COARSE_VELOCITY) = 0.5 * ( rParent1.FastGetSolutionStepValue(VELOCITY) + rParent2.FastGetSolutionStepValue(VELOCITY) ); } else { itNode->GetValue(COARSE_VELOCITY) = itNode->FastGetSolutionStepValue(VELOCITY); } } } /// Return the Galerkin (+stabilization) and Model terms for this element (2D version) void GermanoTerms2D(Element& rElem, array_1d<double,3>& rShapeFunc, BoundedMatrix<double,3,2>& rShapeDeriv, BoundedMatrix<double,2,2>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 2; const double NumNodes = 3; // Initialize double Area; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity2D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Area); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Area; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Grad(u) ] double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } // "Fixed" part of Smagorinsky viscosity: Density * FilterWidth^2 * Norm(SymmetricGrad(U)). 2*C^2 is accounted for in the caller function const double sqH = 2*Area; rModel *= Density * sqH * sqrt(SqNorm); } /// Return the Galerkin (+stabilization) and Model terms for this element (3D version) void GermanoTerms3D(Element& rElem, array_1d<double,4>& rShapeFunc, BoundedMatrix<double,4,3>& rShapeDeriv, BoundedMatrix<double,3,3>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 3; const double NumNodes = 4; // Initialize double Volume; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity3D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Volume); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Volume; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Symmetric Grad(u) ] = ( 2 * Sij * Sij )^(1/2), we compute the Sij * Sij part in the following loop: double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } const double cubeH = 6*Volume; rModel *= Density * pow(cubeH, 2.0/3.0) * sqrt(2.0 * SqNorm); } /// Equivalent to VMS2DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity2D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 3; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Equivalent to VMS3DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity3D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 4; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = rCoarseVel[2]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Call the element's member functions to obtain its residual void CalculateResidual(Element& rElement, Matrix& rMassMatrix, ///@todo This matrix and the next vector should be transformed to static members once we find a threadsafe way to do so Vector& rAuxVector, Vector& rResidual, const ProcessInfo& rCurrentProcessInfo) { const auto& r_const_elem_ref = rElement; rElement.InitializeNonLinearIteration(rCurrentProcessInfo); // Dynamic stabilization terms rElement.CalculateRightHandSide(rResidual,rCurrentProcessInfo); // Dynamic Terms rElement.CalculateMassMatrix(rMassMatrix,rCurrentProcessInfo); r_const_elem_ref.GetSecondDerivativesVector(rAuxVector,0); noalias(rResidual) -= prod(rMassMatrix,rAuxVector); // Velocity Terms rElement.CalculateLocalVelocityContribution(rMassMatrix,rResidual,rCurrentProcessInfo); // Note that once we are here, we no longer need the mass matrix } /// Check if a patch index is known void AddNewIndex( std::vector<int>& rIndices, int ThisIndex ) { bool IsNew = true; for( std::vector<int>::iterator itIndex = rIndices.begin(); itIndex != rIndices.end(); ++itIndex) { if( ThisIndex == *itIndex) { IsNew = false; break; } } if (IsNew) rIndices.push_back(ThisIndex); } ///@} // Private operations }; ///@} Kratos classes ///@} Application group } // namespace Kratos #endif /* KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED */
partial.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> void initcond(int nx, double y0[nx], double x[nx]); void timestep(double t, double dt, double y0); void RuKu3(double t0, double dt, int nx, double y0[nx], double x[nx], int nghost); void get_rhs(double t, int nx, double f[nx], double x[nx], double rhs[nx], int nghost); void der2(int nx, double fx[nx], double x[nx], double der2f[nx], int nghost); int main(int argc, const char *argv[]) { int bc_type = 1; double wait = 0.001; double dt = 1e-5; int nsteps = 1000000000; int nx = 10000; nx = atoi(argv[1]); dt = atof(argv[2]); int num_threads = atoi(argv[3]); omp_set_num_threads(num_threads); int nghost = 1; double verbose = 0; double y0[nx]; double x[nx]; double t = 0.0; int i = 0; double ymax, ymean, ymin; double tmax = 1000.; initcond(nx, y0, x); //------------------------------------------------------------------------- // Starting loop over time //------------------------------------------------------------------------- // printf("\n Starting:\n"); double start = omp_get_wtime(); while (i < nsteps && t < tmax) { RuKu3(t, dt, nx, y0, x, nghost); t = t + dt; if (i % 1000 == 0) { ymin = ymax = ymean = y0[0]; for (int j = 1; j < nx; j++) { if (y0[j] > ymax) { ymax = y0[j]; } if (y0[j] < ymin) { ymin = y0[j]; } ymean += y0[j]; } } i += 1; } printf("%f\n", omp_get_wtime() - start); printf(" Timestep, time %i %f \n", i, t); printf(" ymin ymax ymean %f %f %f \n", ymin, ymax, ymean / nx); return 0; } //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; void get_rhs(double t, int nx, double f[nx], double x[nx], double rhs[nx], int nghost) { // // This function provides the right hand side (RHS) of the differential equations // double der2f[nx]; // Second derivative of f double derf[nx]; // First derivative of f // ######################################### // advection equation with constent velocity // //der2(nx,f,x,derf,nghost); //for (int i=0;i<nx;i++) { // rhs[i] = -0.5 * derf[i]; //} // // ################## // diffusion equation // der2(nx, f, x, der2f, nghost); // #pragma omp parallel for default(none) shared(rhs, der2f, nx) schedule(static) for (int i = 0; i < nx; i++) { rhs[i] = 0.001 * der2f[i]; } } //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; void RuKu3(double t, double dt, int nx, double y0[nx], double x[nx], int nghost) { // double t1, t2; double k2[nx], y2[nx]; double k3[nx], y[nx]; double rhs[nx]; double k1[nx], y1[nx]; // get_rhs(t, nx, y0, x, rhs, nghost); #pragma omp parallel for default(none) shared(nx, dt, rhs, y0, y1, k1) schedule(static) for (int i = 0; i < nx; i++) { k1[i] = rhs[i] * dt; y1[i] = y0[i] + k1[i] * 8 / 15; } t1 = t + dt * 8 / 15; // get_rhs(t1, nx, y1, x, rhs, nghost); #pragma omp parallel for default(none) shared(nx, dt, rhs, y0, y2, k1, k2) schedule(static) for (int i = 0; i < nx; i++) { k2[i] = rhs[i] * dt; y2[i] = y0[i] + k1[i] / 4 + k2[i] * 5 / 12; } t2 = t + dt * 2 / 3; // get_rhs(t2, nx, y2, x, rhs, nghost); #pragma omp parallel for default(none) shared(nx, dt, rhs, y0, k1, k3) schedule(static) for (int i = 0; i < nx; i++) { k3[i] = rhs[i] * dt; y0[i] = y0[i] + k1[i] / 4 + k3[i] * 3 / 4; } } //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; void initcond(int nx, double y0[nx], double x[nx]) { // Initialize with a gaussian blob // double width = 0.2; double ampl = 1.; double x0 = 0.; // #pragma omp parallel for default(none) shared(nx, x, x0, y0, ampl, width) schedule(static) for (int i = 0; i < nx; i++) { x[i] = (double)2. * i / (nx - 1.) - 1.; y0[i] = ampl * exp(-pow(x[i] - x0, 2.) / width); } } //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; void der2(int nx, double fx[nx], double x[nx], double der2f[nx], int nghost) { // double dx; // dx = x[1] - x[0]; switch (nghost) { // ----------------------------------- case 1: #pragma omp parallel for default(none) shared(nghost, nx, der2f, fx) schedule(static) for (int i = nghost; i < nx - nghost; i++) { der2f[i] = +1. * fx[i - nghost] - 2. * fx[i] + 1. * fx[i + nghost]; } // Periodic boundary der2f[0] = +1. * fx[nx - 1] - 2. * fx[0] + 1. * fx[nghost]; der2f[nx - 1] = +1. * fx[nx - 2] - 2. * fx[nx - 1] + 1. * fx[0]; #pragma omp parallel for default(none) shared(der2f, nx, dx) schedule(static) for (int i = 0; i < nx; i++) { der2f[i] = der2f[i] / dx / dx; } break; default: printf("nhgost must be 1"); break; } }
LAGraph_1_to_n.c
//------------------------------------------------------------------------------ // LAGraph_1_to_n.c //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // // See additional acknowledgments in the LICENSE file, // or contact permission@sei.cmu.edu for the full terms. //**************************************************************************** // FIXME: this is not yet included in the test coverage suite // Create either a GrB_INT64 or GrB_INT32 "ramp" vector 1:n #include <LAGraph.h> #include <LAGraphX.h> #define LAGraph_FREE_ALL \ { \ GrB_free (&v) ; \ LAGraph_Free ((void **)&I) ; \ LAGraph_Free ((void **)&X) ; \ } //**************************************************************************** /// @todo If this method gets promoted it should return GrB_Type for scalar /// that is stored in the output vector. /// GrB_Info LAGraph_1_to_n // create an integer vector v = 1:n ( GrB_Vector *v_handle, // vector to create GrB_Index n // size of vector to create ) { GrB_Info info ; GrB_Vector v = NULL ; int nthreads; LAGraph_GetNumThreads (&nthreads, NULL) ; nthreads = LAGraph_MIN (n / 4096, nthreads) ; nthreads = LAGraph_MAX (nthreads, 1) ; // allocate workspace GrB_Index *I = LAGraph_Malloc (n, sizeof (GrB_Index)) ; // create a 32-bit or 64-bit integer vector 1:n if (n > INT32_MAX) { int64_t *X = LAGraph_Malloc (n, sizeof (int64_t)) ; if (I == NULL || X == NULL) { LAGraph_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { I [k] = k ; X [k] = k+1 ; } LAGRAPH_OK (GrB_Vector_new (&v, GrB_INT64, n)) ; LAGRAPH_OK (GrB_Vector_build (v, I, X, n, GrB_PLUS_INT64)) ; LAGraph_Free ((void **)&X) ; X = NULL; } else { int32_t *X = LAGraph_Malloc (n, sizeof (int32_t)) ; if (I == NULL || X == NULL) { LAGraph_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { I [k] = k ; X [k] = k+1 ; } LAGRAPH_OK (GrB_Vector_new (&v, GrB_INT32, n)) ; LAGRAPH_OK (GrB_Vector_build (v, I, X, n, GrB_PLUS_INT32)) ; LAGraph_Free ((void **)&X) ; X = NULL; } LAGraph_Free ((void **)&I) ; I = NULL; // return result (*v_handle) = v ; v = NULL; return (GrB_SUCCESS) ; }
unpack.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Sandeep Sharma * */ #include <stdio.h> #include <stdlib.h> #include "math.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) #define MIN(I,J) ((I) < (J) ? (I) : (J)) void unpackE3(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // 6-fold symmetry size_t e3slicesize = (norb2*norb2*norb2 + 3*norb2*norb2 + 2*norb2)/6; double *fj = (double*)malloc(e3slicesize*sizeof(double)); fread(fj, sizeof(*fj), e3slicesize, f); fclose(f); double *e3 = (double*)malloc(norb2*norb2*norb2*sizeof(double)); #pragma omp parallel default(none) \ shared(norb, norb2, e3, fj) { int i, j, k, l, m, n; #pragma omp parallel for for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) { size_t a = i*norb+l, b = j*norb+m, c = k*norb+n; // E_ABC=E_ACB=... size_t A=0, B=0, C=0; A = MAX(a,MAX(b,c)); if (A==a) { B = MAX(b,c); C = MIN(b,c); } else if (A==b) { B = MAX(a,c); C = MIN(a,c); } else { B = MAX(a,b); C = MIN(a,b); } // tetrahedral number + triangular number + square number // A(A+1)(A+2)/3! + B(B+1)/2! + C/1! size_t p = (A*A*A + 3*A*A + 2*A)/6 + (B*B + B)/2 + C ; // fully square number size_t q = i+j*norb+k*norb2+l*norb*norb2+m*norb2*norb2+n*norb2*norb2*norb; e3[q] = fj[p]; } } FILE *f2 = fopen(fout, "wb"); fwrite(e3, sizeof(*e3), norb2*norb2*norb2, f2); fclose(f2); free(e3); free(fj); }; void unpackE4(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // 8-fold symmetry size_t e4slicesize = (norb2*norb2*norb2*norb2 + 6*norb2*norb2*norb2 + 11*norb2*norb2 + 6*norb2)/24; double *fj = (double*)malloc(e4slicesize*sizeof(double)); fread(fj, sizeof(*fj), e4slicesize, f); fclose(f); double *e4 = (double*)malloc(norb2*norb2*norb2*norb2*sizeof(double)); int i, j, k, h, l, m, n, o; for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (h=0; h<norb; h++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) for (o=0; o<norb; o++) { size_t a = i*norb+l, b = j*norb+m, c = k*norb+n, d = h*norb+o; // E_ABCD=E_ACBD=... size_t A=0, B=0, C=0, D=0; size_t prov=MAX(a,b); A = MAX(prov,MAX(c,d)); if (A==a) { if (MAX(b,MAX(c,d))==b) { B=b; C=MAX(c,d); D=MIN(c,d); } else if (MAX(b,MAX(c,d))==c) { B=c; C=MAX(b,d); D=MIN(b,d); } else if (MAX(b,MAX(c,d))==d) { B=d; C=MAX(b,c); D=MIN(b,c); } } else if (A==b) { if (MAX(a,MAX(c,d))==a) { B=a; C=MAX(c,d); D=MIN(c,d); } else if (MAX(a,MAX(c,d))==c) { B=c; C=MAX(a,d); D=MIN(a,d); } else if (MAX(a,MAX(c,d))==d) { B=d; C=MAX(a,c); D=MIN(a,c); } } else if (A==c) { if (MAX(b,MAX(a,d))==b) { B=b; C=MAX(a,d); D=MIN(a,d); } else if (MAX(b,MAX(a,d))==a) { B=a; C=MAX(b,d); D=MIN(b,d); } else if (MAX(b,MAX(a,d))==d) { B=d; C=MAX(b,a); D=MIN(b,a); } } else if (A==d) { if (MAX(b,MAX(c,a))==b) { B=b; C=MAX(c,a); D=MIN(c,a); } else if (MAX(b,MAX(c,a))==c) { B=c; C=MAX(b,a); D=MIN(b,a); } else if (MAX(b,MAX(c,a))==a) { B=a; C=MAX(b,c); D=MIN(b,c); } }; // pentatopic number + tetrahedral number + triangular number + square number // A(A+1)(A+2)(A+3)/4! + B(B+1)(B+2)/3! + C(C+1)/2! + D/1! size_t p = (A*A*A*A + 6*A*A*A + 11*A*A + 6*A)/24 + (B*B*B + 3*B*B + 2*B)/6 + (C*C + C)/2 + D ; // fully square number size_t q = i+j*norb+k*norb2+h*norb*norb2+l*norb2*norb2+m*norb*norb2*norb2+n*norb2*norb2*norb2+o*norb*norb2*norb2*norb2; e4[q] = fj[p]; } FILE *f2 = fopen(fout, "wb"); fwrite(e4, sizeof(*e4), norb2*norb2*norb2*norb2, f2); fclose(f2); free(e4); free(fj); }; void unpackE3_BLOCK(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // no symmetry! size_t e3slicesize = (norb2*norb2*norb2); double *fj = (double*)malloc(e3slicesize*sizeof(double)); fseek(f,93,SEEK_SET); fread(fj, sizeof(*fj), e3slicesize, f); fclose(f); double *e3 = (double*)malloc(norb2*norb2*norb2*sizeof(double)); #pragma omp parallel default(none) \ shared(norb, norb2, e3, fj) { int i, j, k, l, m, n; #pragma omp parallel for for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) { // is given as E^ijk_nml and is expected to come out as E^ijk_lmn size_t p = i+j*norb+k*norb2 +n*norb*norb2+m*norb2*norb2+l*norb2*norb2*norb; size_t q = i+j*norb+k*norb2 +l*norb*norb2+m*norb2*norb2+n*norb2*norb2*norb; e3[q] = fj[p]; }; } FILE *f2 = fopen(fout, "wb"); fwrite(e3, sizeof(*e3), norb2*norb2*norb2, f2); fclose(f2); free(e3); free(fj); }; void unpackE4_BLOCK(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // no symmetry! size_t e4slicesize = (norb2*norb2*norb2*norb2); double *fj = (double*)malloc(e4slicesize*sizeof(double)); fseek(f,109,SEEK_SET); fread(fj, sizeof(*fj), e4slicesize, f); fclose(f); double *e4 = (double*)malloc(norb2*norb2*norb2*norb2*sizeof(double)); int i, j, k, h, l, m, n, o; for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (h=0; h<norb; h++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) for (o=0; o<norb; o++) { // is given as E^ijkh_onml and is expected to come out as E^ijkh_lmno size_t p = i+j*norb+k*norb2+h*norb*norb2 +o*norb2*norb2+n*norb*norb2*norb2+m*norb2*norb2*norb2+l*norb*norb2*norb2*norb2; size_t q = i+j*norb+k*norb2+h*norb*norb2 +l*norb2*norb2+m*norb*norb2*norb2+n*norb2*norb2*norb2+o*norb*norb2*norb2*norb2; e4[q] = fj[p]; }; FILE *f2 = fopen(fout, "wb"); fwrite(e4, sizeof(*e4), norb2*norb2*norb2*norb2, f2); fclose(f2); free(e4); free(fj); };
convolution_1x1_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt); } static void conv1x1s2_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const signed char* r0 = bottom_blob.channel(p); signed char* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { outptr[0] = r0[0]; outptr[1] = r0[2]; outptr[2] = r0[4]; outptr[3] = r0[6]; r0 += 8; outptr += 4; } for (; j + 1 < outw; j += 2) { outptr[0] = r0[0]; outptr[1] = r0[2]; r0 += 4; outptr += 2; } for (; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_pack1to4_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt); }
prop2DAcoVTIDenQ_DEO2_FDTD.h
#ifndef PROP2DACOVTIDENQ_DEO2_FDTD_H #define PROP2DACOVTIDENQ_DEO2_FDTD_H #include <omp.h> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <fftw3.h> #include <complex> #include "propagatorStaticFunctions.h" #define MIN(x,y) ((x)<(y)?(x):(y)) class Prop2DAcoVTIDenQ_DEO2_FDTD { public: const bool _freeSurface; const long _nbx, _nbz, _nthread, _nx, _nz, _nsponge; const float _dx, _dz, _dt; const float _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz; const float _fDefault = 0.85f; float * __restrict__ _v = NULL; float * __restrict__ _eps = NULL; float * __restrict__ _eta = NULL; float * __restrict__ _b = NULL; float * __restrict__ _f = NULL; float * __restrict__ _dtOmegaInvQ = NULL; float * __restrict__ _pSpace = NULL; float * __restrict__ _mSpace = NULL; float * __restrict__ _tmpPx1 = NULL; float * __restrict__ _tmpPz1 = NULL; float * __restrict__ _tmpMx1 = NULL; float * __restrict__ _tmpMz1 = NULL; float * __restrict__ _tmpPx2 = NULL; float * __restrict__ _tmpPz2 = NULL; float * __restrict__ _tmpMx2 = NULL; float * __restrict__ _tmpMz2 = NULL; float * _pOld = NULL; float * _pCur = NULL; float * _mOld = NULL; float * _mCur = NULL; Prop2DAcoVTIDenQ_DEO2_FDTD( bool freeSurface, long nthread, long nx, long nz, long nsponge, float dx, float dz, float dt, const long nbx, const long nbz) : _freeSurface(freeSurface), _nthread(nthread), _nx(nx), _nz(nz), _nsponge(nsponge), _nbx(nbx), _nbz(nbz), _dx(dx), _dz(dz), _dt(dt), _c8_1(+1225.0 / 1024.0), _c8_2(-245.0 / 3072.0), _c8_3(+49.0 / 5120.0), _c8_4(-5.0 / 7168.0), _invDx(1.0 / _dx), _invDz(1.0 / _dz) { // Allocate arrays _v = new float[_nx * _nz]; _eps = new float[_nx * _nz]; _eta = new float[_nx * _nz]; _b = new float[_nx * _nz]; _f = new float[_nx * _nz]; _dtOmegaInvQ = new float[_nx * _nz]; _pSpace = new float[_nx * _nz]; _mSpace = new float[_nx * _nz]; _tmpPx1 = new float[_nx * _nz]; _tmpPz1 = new float[_nx * _nz]; _tmpMx1 = new float[_nx * _nz]; _tmpMz1 = new float[_nx * _nz]; _tmpPx2 = new float[_nx * _nz]; _tmpPz2 = new float[_nx * _nz]; _tmpMx2 = new float[_nx * _nz]; _tmpMz2 = new float[_nx * _nz]; _pOld = new float[_nx * _nz]; _pCur = new float[_nx * _nz]; _mOld = new float[_nx * _nz]; _mCur = new float[_nx * _nz]; numaFirstTouch(_nx, _nz, _nthread, _v, _eps, _eta, _b, _f, _dtOmegaInvQ, _pSpace, _mSpace, _tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _tmpPx2, _tmpPz2, _tmpMx2, _tmpMz2, _pOld, _pCur, _mOld, _mCur, _nbx, _nbz); } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void numaFirstTouch( const long nx, const long nz, const long nthread, float * __restrict__ v, float * __restrict__ eps, float * __restrict__ eta, float * __restrict__ b, float * __restrict__ f, float * __restrict__ dtOmegaInvQ, float * __restrict__ pSpace, float * __restrict__ mSpace, float * __restrict__ tmpPx1, float * __restrict__ tmpPz1, float * __restrict__ tmpMx1, float * __restrict__ tmpMz1, float * __restrict__ tmpPx2, float * __restrict__ tmpPz2, float * __restrict__ tmpMx2, float * __restrict__ tmpMz2, float * __restrict__ pOld, float * __restrict__ pCur, float * __restrict__ mOld, float * __restrict__ mCur, const long BX_2D, const long BZ_2D) { const long nx4 = nx - 4; const long nz4 = nz - 4; #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_2D) { for (long bz = 4; bz < nz4; bz += BZ_2D) { const long kxmax = MIN(bx + BX_2D, nx4); const long kzmax = MIN(bz + BZ_2D, nz4); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * nz + kz; v[k] = 0; eps[k] = 0; eta[k] = 0; b[k] = 0; f[k] = 0; dtOmegaInvQ[k] = 0; pSpace[k] = 0; mSpace[k] = 0; tmpPx1[k] = 0; tmpPz1[k] = 0; tmpMx1[k] = 0; tmpMz1[k] = 0; tmpPx2[k] = 0; tmpPz2[k] = 0; tmpMx2[k] = 0; tmpMz2[k] = 0; pOld[k] = 0; pCur[k] = 0; mOld[k] = 0; mCur[k] = 0; } } } } // zero annulus #pragma omp parallel for num_threads(nthread) schedule(static) for (long kz = 0; kz < 4; kz++) { #pragma omp simd for (long kx = 0; kx < nx; kx++) { const long k = kx * _nz + kz; v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] = mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] = tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kz = nz4; kz < nz; kz++) { #pragma omp simd for (long kx = 0; kx < nx; kx++) { const long k = kx * _nz + kz; v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] = mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] = tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < 4; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long k = kx * _nz + kz; v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] = mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] = tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = nx4; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long k = kx * _nz + kz; v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] = mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] = tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0; } } } ~Prop2DAcoVTIDenQ_DEO2_FDTD() { if (_v != NULL) delete [] _v; if (_eps != NULL) delete [] _eps; if (_eta != NULL) delete [] _eta; if (_b != NULL) delete [] _b; if (_f != NULL) delete [] _f; if (_dtOmegaInvQ != NULL) delete [] _dtOmegaInvQ; if (_pSpace != NULL) delete [] _pSpace; if (_mSpace != NULL) delete [] _mSpace; if (_tmpPx1 != NULL) delete [] _tmpPx1; if (_tmpPz1 != NULL) delete [] _tmpPz1; if (_tmpMx1 != NULL) delete [] _tmpMx1; if (_tmpMz1 != NULL) delete [] _tmpMz1; if (_tmpPx2 != NULL) delete [] _tmpPx2; if (_tmpPz2 != NULL) delete [] _tmpPz2; if (_tmpMx2 != NULL) delete [] _tmpMx2; if (_tmpMz2 != NULL) delete [] _tmpMz2; if (_pOld != NULL) delete [] _pOld; if (_pCur != NULL) delete [] _pCur; if (_mOld != NULL) delete [] _mOld; if (_mCur != NULL) delete [] _mCur; } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif void info() { printf("\n"); printf("Prop2DAcoVTIDenQ_DEO2_FDTD\n"); printf(" nx,nz; %5ld %5ld\n", _nx, _nz); printf(" nthread,nsponge,fs; %5ld %5ld %5d\n", _nthread, _nsponge, _freeSurface); printf(" X min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dx * (_nx - 1), _dx); printf(" Z min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dz * (_nz - 1), _dz); } /** * Notes * - User must have called setupDtOmegaInvQ_2D to initialize the array _dtOmegaInvQ * - wavefield arrays are switched in this call * pCur -> pOld * pOld -> pCur * mCur -> mOld * mOld -> mCur */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void timeStep() { applyFirstDerivatives2D_PlusHalf_Sandwich( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _pCur, _pCur, _mCur, _mCur, _eps, _eta, _f, _b, _tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _nbx, _nbz); applyFirstDerivatives2D_MinusHalf_TimeUpdate_Nonlinear( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _dt, _tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _v, _b, _dtOmegaInvQ, _pCur, _mCur, _pSpace, _mSpace, _pOld, _mOld, _nbx, _nbz); // swap pointers float *pswap = _pOld; _pOld = _pCur; _pCur = pswap; float *mswap = _mOld; _mOld = _mCur; _mCur = mswap; } /** * Same as above, but does not collect the spatial derivatives * Note this is only used in the PSD operators, where the first (transient) time steps do * not need to save the P'' term */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void timeStepLinear() { applyFirstDerivatives2D_PlusHalf_Sandwich( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _pCur, _pCur, _mCur, _mCur, _eps, _eta, _f, _b, _tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _nbx, _nbz); applyFirstDerivatives2D_MinusHalf_TimeUpdate_Linear( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _dt, _tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _v, _b, _dtOmegaInvQ, _pCur, _mCur, _pOld, _mOld, _nbx, _nbz); // swap pointers float *pswap = _pOld; _pOld = _pCur; _pCur = pswap; float *mswap = _mOld; _mOld = _mCur; _mCur = mswap; } /** * Scale spatial derivatives by v^2/b to make them temporal derivs */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void scaleSpatialDerivatives() { #pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _nz + kz; const float v2OverB = _v[k] * _v[k] / _b[k]; _pSpace[k] *= v2OverB; _mSpace[k] *= v2OverB; } } } } } /** * Add the Born source at the current time * * User must have: * - called the nonlinear forward * - saved 2nd time derivative of pressure at corresponding time index in array dp2 * - Born source term will be injected into the _pCur array */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void forwardBornInjection_V(float *dVel, float *wavefieldDP, float *wavefieldDM) { #pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float dV = dVel[k]; // V^2/b factor to "clear" the b/V^2 factor on L_tP and L_tM // _dt^2 factor is from the finite difference approximation // 2B_dV/V^3 factor is from the linearization const float factor = 2 * _dt * _dt * dV / V; _pCur[k] += factor * wavefieldDP[k]; _mCur[k] += factor * wavefieldDM[k]; } } } } } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void forwardBornInjection_VEA(float *dVel, float *dEps, float *dEta, float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) { // Right side spatial derivatives for the Born source applyFirstDerivatives2D_PlusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, wavefieldP, wavefieldP, _tmpPx1, _tmpPz1, _nbx, _nbz); applyFirstDerivatives2D_PlusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, wavefieldM, wavefieldM, _tmpMx1, _tmpMz1, _nbx, _nbz); // Sandwich terms for the Born source // note flipped sign for Z derivative term between P and M #pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _nz + kz; const float V = _v[k]; const float E = _eps[k]; const float A = _eta[k]; const float B = _b[k]; const float F = _f[k]; const float dE = dEps[k]; const float dA = dEta[k]; _tmpPx2[k] = (+2 * B * dE) *_tmpPx1[k]; _tmpPz2[k] = (-2 * B * F * A * dA) *_tmpPz1[k] + (dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMz1[k]; _tmpMx2[k] = 0; _tmpMz2[k] = (+2 * B * F * A * dA) *_tmpMz1[k] + (dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPz1[k]; } } } } // Left side spatial derivatives for the Born source applyFirstDerivatives2D_MinusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _tmpPx2, _tmpPz2, _tmpPx1, _tmpPz1, _nbx, _nbz); applyFirstDerivatives2D_MinusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _tmpMx2, _tmpMz2, _tmpMx1, _tmpMz1, _nbx, _nbz); // add the born source at the current time #pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float dV = dVel[k]; const float dt2v2OverB = _dt * _dt * V * V / B; const float factor = 2 * B * dV / (V * V * V); _pCur[k] += dt2v2OverB * (factor * wavefieldDP[k] + _tmpPx1[k] + _tmpPz1[k]); _mCur[k] += dt2v2OverB * (factor * wavefieldDM[k] + _tmpMx1[k] + _tmpMz1[k]); } } } } } /** * Accumulate the Born image term at the current time * * User must have: * - called the nonlinear forward * - saved 2nd time derivative of pressure at corresponding time index in array dp2 * - Born image term will be accumulated iu the _dm array */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void adjointBornAccumulation_V(float *dVel, float *wavefieldDP, float *wavefieldDM) { #pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float factor = 2 * B / (V * V * V); dVel[k] += factor * (wavefieldDP[k] * _pOld[k] + wavefieldDM[k] * _mOld[k]); } } } } } /** * Apply Kz wavenumber filter for up/down wavefield seperation * Faqi, 2011, Geophysics https://library.seg.org/doi/full/10.1190/1.3533914 * * We handle the FWI and RTM imaging conditions with a condition inside the OMP loop * * Example Kz filtering with 8 samples * frequency | +0 | +1 | +2 | +3 | N | -3 | -2 | -1 | * original | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * upgoing | 0 | X | X | X | 4 | 5 | 6 | 7 | * dngoing | 0 | 1 | 2 | 3 | 4 | X | X | X | */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void adjointBornAccumulation_wavefieldsep_V(float *dVel, float *wavefieldDP, float *wavefieldDM, const long isFWI) { const long nfft = 2 * _nz; const float scale = 1.0f / (float)(nfft); // FWI: adj wavefield is dngoing // RTM: adj wavefield is upgoing const long kfft_adj = (isFWI) ? 0 : nfft / 2; std::complex<float> * __restrict__ tmp = new std::complex<float>[nfft]; fftwf_plan planForward = fftwf_plan_dft_1d(nfft, reinterpret_cast<fftwf_complex*>(tmp), reinterpret_cast<fftwf_complex*>(tmp), +1, FFTW_ESTIMATE); fftwf_plan planInverse = fftwf_plan_dft_1d(nfft, reinterpret_cast<fftwf_complex*>(tmp), reinterpret_cast<fftwf_complex*>(tmp), -1, FFTW_ESTIMATE); delete [] tmp; #pragma omp parallel num_threads(_nthread) { std::complex<float> * __restrict__ tmp_nlf_p = new std::complex<float>[nfft]; std::complex<float> * __restrict__ tmp_adj_p = new std::complex<float>[nfft]; std::complex<float> * __restrict__ tmp_nlf_m = new std::complex<float>[nfft]; std::complex<float> * __restrict__ tmp_adj_m = new std::complex<float>[nfft]; #pragma omp for schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { const long kxmax = MIN(bx + _nbx, _nx); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kfft = 0; kfft < nfft; kfft++) { tmp_nlf_p[kfft] = 0; tmp_adj_p[kfft] = 0; tmp_nlf_m[kfft] = 0; tmp_adj_m[kfft] = 0; } #pragma omp simd for (long kz = 0; kz < _nz; kz++) { const long k = kx * _nz + kz; tmp_nlf_p[kz] = scale * wavefieldDP[k]; tmp_adj_p[kz] = scale * _pOld[k]; tmp_nlf_m[kz] = scale * wavefieldDM[k]; tmp_adj_m[kz] = scale * _mOld[k]; } fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_nlf_p), reinterpret_cast<fftwf_complex*>(tmp_nlf_p)); fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_adj_p), reinterpret_cast<fftwf_complex*>(tmp_adj_p)); fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_nlf_m), reinterpret_cast<fftwf_complex*>(tmp_nlf_m)); fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_adj_m), reinterpret_cast<fftwf_complex*>(tmp_adj_m)); // upgoing: zero the positive frequencies, excluding Nyquist // dngoing: zero the negative frequencies, excluding Nyquist #pragma omp simd for (long k = 1; k < nfft / 2; k++) { tmp_nlf_p[nfft / 2 + k] = 0; tmp_adj_p[kfft_adj + k] = 0; tmp_nlf_m[nfft / 2 + k] = 0; tmp_adj_m[kfft_adj + k] = 0; } fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_nlf_p), reinterpret_cast<fftwf_complex*>(tmp_nlf_p)); fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_adj_p), reinterpret_cast<fftwf_complex*>(tmp_adj_p)); fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_nlf_m), reinterpret_cast<fftwf_complex*>(tmp_nlf_m)); fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_adj_m), reinterpret_cast<fftwf_complex*>(tmp_adj_m)); // Faqi eq 10 // Applied to FWI: [Sup * Rdn] // Applied to RTM: [Sup * Rup] #pragma omp simd for (long kz = 0; kz < _nz; kz++) { const long k = kx * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float factor = 2 * B / (V * V * V); dVel[k] += factor * (real(tmp_nlf_p[kz] * tmp_adj_p[kz]) + real(tmp_nlf_m[kz] * tmp_adj_m[kz])); } } // end loop over kx } // end loop over bx delete [] tmp_nlf_p; delete [] tmp_adj_p; delete [] tmp_nlf_m; delete [] tmp_adj_m; } // end parallel region fftwf_destroy_plan(planForward); fftwf_destroy_plan(planInverse); } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void adjointBornAccumulation_VEA(float *dVel, float *dEps, float *dEta, float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) { // Right side spatial derivatives for the adjoint accumulation applyFirstDerivatives2D_PlusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, wavefieldP, wavefieldP, _tmpPx1, _tmpPz1, _nbx, _nbz); applyFirstDerivatives2D_PlusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, wavefieldM, wavefieldM, _tmpMx1, _tmpMz1, _nbx, _nbz); applyFirstDerivatives2D_PlusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _pOld, _pOld, _tmpPx2, _tmpPz2, _nbx, _nbz); applyFirstDerivatives2D_PlusHalf( _freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _mOld, _mOld, _tmpMx2, _tmpMz2, _nbx, _nbz); // Sandwich terms for the adjoint accumulation #pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _nz + kz; const float V = _v[k]; const float E = _eps[k]; const float A = _eta[k]; const float B = _b[k]; const float F = _f[k]; const float factor = 2 * B / (V * V * V); dVel[k] += factor * wavefieldDP[k] * _pOld[k] + factor * wavefieldDM[k] * _mOld[k]; dEps[k] += -2 * B * _tmpPx1[k] * _tmpPx2[k]; const float partP = 2 * B * F * A * _tmpPz1[k] - (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMz1[k]; const float partM = 2 * B * F * A * _tmpMz1[k] + (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPz1[k]; dEta[k] += partP * _tmpPz2[k] - partM * _tmpMz2[k]; } } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives2D_PlusHalf_Sandwich( const long freeSurface, const long nx, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDz, const Type * __restrict__ const inPX, const Type * __restrict__ const inPZ, const Type * __restrict__ const inMX, const Type * __restrict__ const inMZ, const Type * __restrict__ const fieldEps, const Type * __restrict__ const fieldEta, const Type * __restrict__ const fieldVsVp, const Type * __restrict__ const fieldBuoy, Type * __restrict__ tmpPX, Type * __restrict__ tmpPZ, Type * __restrict__ tmpMX, Type * __restrict__ tmpMZ, const long BX_2D, const long BZ_2D) { const long nx4 = nx - 4; const long nz4 = nz - 4; // zero output arrays #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 0; bx < nx; bx += BX_2D) { for (long bz = 0; bz < nz; bz += BZ_2D) { const long kxmax = MIN(bx + BX_2D, nx); const long kzmax = MIN(bz + BZ_2D, nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { long k = kx * nz + kz; tmpPX[k] = 0; tmpPZ[k] = 0; tmpMX[k] = 0; tmpMZ[k] = 0; } } } } // interior #pragma omp parallel for collapse(2) num_threads(nthread) schedule(guided) for (long bx = 4; bx < nx4; bx += BX_2D) { for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */ const long kxmax = MIN(bx + BX_2D, nx4); const long kzmax = MIN(bz + BZ_2D, nz4); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kxnz = kx * nz; const long k = kxnz + kz; const Type stencilDPx = c8_1 * (- inPX[(kx+0) * nz + kz] + inPX[(kx+1) * nz + kz]) + c8_2 * (- inPX[(kx-1) * nz + kz] + inPX[(kx+2) * nz + kz]) + c8_3 * (- inPX[(kx-2) * nz + kz] + inPX[(kx+3) * nz + kz]) + c8_4 * (- inPX[(kx-3) * nz + kz] + inPX[(kx+4) * nz + kz]); const Type stencilDPz = c8_1 * (- inPZ[kxnz + (kz+0)] + inPZ[kxnz + (kz+1)]) + c8_2 * (- inPZ[kxnz + (kz-1)] + inPZ[kxnz + (kz+2)]) + c8_3 * (- inPZ[kxnz + (kz-2)] + inPZ[kxnz + (kz+3)]) + c8_4 * (- inPZ[kxnz + (kz-3)] + inPZ[kxnz + (kz+4)]); const Type stencilDMx = c8_1 * (- inMX[(kx+0) * nz + kz] + inMX[(kx+1) * nz + kz]) + c8_2 * (- inMX[(kx-1) * nz + kz] + inMX[(kx+2) * nz + kz]) + c8_3 * (- inMX[(kx-2) * nz + kz] + inMX[(kx+3) * nz + kz]) + c8_4 * (- inMX[(kx-3) * nz + kz] + inMX[(kx+4) * nz + kz]); const Type stencilDMz = c8_1 * (- inMZ[kxnz + (kz+0)] + inMZ[kxnz + (kz+1)]) + c8_2 * (- inMZ[kxnz + (kz-1)] + inMZ[kxnz + (kz+2)]) + c8_3 * (- inMZ[kxnz + (kz-2)] + inMZ[kxnz + (kz+3)]) + c8_4 * (- inMZ[kxnz + (kz-3)] + inMZ[kxnz + (kz+4)]); const Type dPx = invDx * stencilDPx; const Type dPz = invDz * stencilDPz; const Type dMx = invDx * stencilDMx; const Type dMz = invDz * stencilDMz; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; tmpPX[k] = B * E * dPx; tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz; tmpMX[k] = B * (1 - F) * dMx; tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz; } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(guided) for (long kx = 4; kx < nx4; kx++) { // kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative // X and Y derivatives are identically zero // [kx * nz + 0] { const Type stencilDPz0 = c8_1 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 1]) + c8_2 * (+ inPZ[kx * nz + 1] + inPZ[kx * nz + 2]) + c8_3 * (+ inPZ[kx * nz + 2] + inPZ[kx * nz + 3]) + c8_4 * (+ inPZ[kx * nz + 3] + inPZ[kx * nz + 4]); const Type stencilDMz0 = c8_1 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 1]) + c8_2 * (+ inMZ[kx * nz + 1] + inMZ[kx * nz + 2]) + c8_3 * (+ inMZ[kx * nz + 2] + inMZ[kx * nz + 3]) + c8_4 * (+ inMZ[kx * nz + 3] + inMZ[kx * nz + 4]); const Type dPx = 0; const Type dPz = invDz * stencilDPz0; const Type dMx = 0; const Type dMz = invDz * stencilDMz0; const long k = kx * nz + 0; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; tmpPX[k] = B * E * dPx; tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz; tmpMX[k] = B * (1 - F) * dMx; tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz; } // kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative // [kx * nz + 1] { const Type stencilDPx1 = c8_1 * (- inPX[(kx+0) * nz + 1] + inPX[(kx+1) * nz + 1]) + c8_2 * (- inPX[(kx-1) * nz + 1] + inPX[(kx+2) * nz + 1]) + c8_3 * (- inPX[(kx-2) * nz + 1] + inPX[(kx+3) * nz + 1]) + c8_4 * (- inPX[(kx-3) * nz + 1] + inPX[(kx+4) * nz + 1]); const Type stencilDPz1 = c8_1 * (- inPZ[kx * nz + 1] + inPZ[kx * nz + 2]) + c8_2 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 3]) + c8_3 * (+ inPZ[kx * nz + 1] + inPZ[kx * nz + 4]) + c8_4 * (+ inPZ[kx * nz + 2] + inPZ[kx * nz + 5]); const Type stencilDMx1 = c8_1 * (- inMX[(kx+0) * nz + 1] + inMX[(kx+1) * nz + 1]) + c8_2 * (- inMX[(kx-1) * nz + 1] + inMX[(kx+2) * nz + 1]) + c8_3 * (- inMX[(kx-2) * nz + 1] + inMX[(kx+3) * nz + 1]) + c8_4 * (- inMX[(kx-3) * nz + 1] + inMX[(kx+4) * nz + 1]); const Type stencilDMz1 = c8_1 * (- inMZ[kx * nz + 1] + inMZ[kx * nz + 2]) + c8_2 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 3]) + c8_3 * (+ inMZ[kx * nz + 1] + inMZ[kx * nz + 4]) + c8_4 * (+ inMZ[kx * nz + 2] + inMZ[kx * nz + 5]); const Type dPx = invDx * stencilDPx1; const Type dPz = invDz * stencilDPz1; const Type dMx = invDx * stencilDMx1; const Type dMz = invDz * stencilDMz1; const long k = kx * nz + 1; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; tmpPX[k] = B * E * dPx; tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz; tmpMX[k] = B * (1 - F) * dMx; tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz; } // kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative // [kx * nz + 2] { const Type stencilDPx2 = c8_1 * (- inPX[(kx+0) * nz + 2] + inPX[(kx+1) * nz + 2]) + c8_2 * (- inPX[(kx-1) * nz + 2] + inPX[(kx+2) * nz + 2]) + c8_3 * (- inPX[(kx-2) * nz + 2] + inPX[(kx+3) * nz + 2]) + c8_4 * (- inPX[(kx-3) * nz + 2] + inPX[(kx+4) * nz + 2]); const Type stencilDPz2 = c8_1 * (- inPZ[kx * nz + 2] + inPZ[kx * nz + 3]) + c8_2 * (- inPZ[kx * nz + 1] + inPZ[kx * nz + 4]) + c8_3 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 5]) + c8_4 * (+ inPZ[kx * nz + 1] + inPZ[kx * nz + 6]); const Type stencilDMx2 = c8_1 * (- inMX[(kx+0) * nz + 2] + inMX[(kx+1) * nz + 2]) + c8_2 * (- inMX[(kx-1) * nz + 2] + inMX[(kx+2) * nz + 2]) + c8_3 * (- inMX[(kx-2) * nz + 2] + inMX[(kx+3) * nz + 2]) + c8_4 * (- inMX[(kx-3) * nz + 2] + inMX[(kx+4) * nz + 2]); const Type stencilDMz2 = c8_1 * (- inMZ[kx * nz + 2] + inMZ[kx * nz + 3]) + c8_2 * (- inMZ[kx * nz + 1] + inMZ[kx * nz + 4]) + c8_3 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 5]) + c8_4 * (+ inMZ[kx * nz + 1] + inMZ[kx * nz + 6]); const Type dPx = invDx * stencilDPx2; const Type dPz = invDz * stencilDPz2; const Type dMx = invDx * stencilDMx2; const Type dMz = invDz * stencilDMz2; const long k = kx * nz + 2; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; tmpPX[k] = B * E * dPx; tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz; tmpMX[k] = B * (1 - F) * dMx; tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz; } // kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative // [kx * nz + 3] { const Type stencilDPx3 = c8_1 * (- inPX[(kx+0) * nz + 3] + inPX[(kx+1) * nz + 3]) + c8_2 * (- inPX[(kx-1) * nz + 3] + inPX[(kx+2) * nz + 3]) + c8_3 * (- inPX[(kx-2) * nz + 3] + inPX[(kx+3) * nz + 3]) + c8_4 * (- inPX[(kx-3) * nz + 3] + inPX[(kx+4) * nz + 3]); const Type stencilDPz3 = c8_1 * (- inPZ[kx * nz + 3] + inPZ[kx * nz + 4]) + c8_2 * (- inPZ[kx * nz + 2] + inPZ[kx * nz + 5]) + c8_3 * (- inPZ[kx * nz + 1] + inPZ[kx * nz + 6]) + c8_4 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 7]); const Type stencilDMx3 = c8_1 * (- inMX[(kx+0) * nz + 3] + inMX[(kx+1) * nz + 3]) + c8_2 * (- inMX[(kx-1) * nz + 3] + inMX[(kx+2) * nz + 3]) + c8_3 * (- inMX[(kx-2) * nz + 3] + inMX[(kx+3) * nz + 3]) + c8_4 * (- inMX[(kx-3) * nz + 3] + inMX[(kx+4) * nz + 3]); const Type stencilDMz3 = c8_1 * (- inMZ[kx * nz + 3] + inMZ[kx * nz + 4]) + c8_2 * (- inMZ[kx * nz + 2] + inMZ[kx * nz + 5]) + c8_3 * (- inMZ[kx * nz + 1] + inMZ[kx * nz + 6]) + c8_4 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 7]); const Type dPx = invDx * stencilDPx3; const Type dPz = invDz * stencilDPz3; const Type dMx = invDx * stencilDMx3; const Type dMz = invDz * stencilDMz3; const long k = kx * nz + 3; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; tmpPX[k] = B * E * dPx; tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz; tmpMX[k] = B * (1 - F) * dMx; tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz; } } } } /** * Combines * applyFirstDerivatives_MinusHalf(P) * secondOrderTimeUpdate_BubeConservation(P) * applyFirstDerivatives_MinusHalf(M) * secondOrderTimeUpdate_BubeConservation(M) * * Updates pOld and mOld with second order time update * * Nonlinear method: outputs the spatial derivatives for source wavefield serialization * Linear method: does not output the spatial derivatives */ template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives2D_MinusHalf_TimeUpdate_Nonlinear( const long freeSurface, const long nx, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDz, const Type dtMod, const Type * __restrict__ const tmpPX, const Type * __restrict__ const tmpPZ, const Type * __restrict__ const tmpMX, const Type * __restrict__ const tmpMZ, const Type * __restrict__ const fieldVel, const Type * __restrict__ const fieldBuoy, const Type * __restrict__ const dtOmegaInvQ, Type * __restrict__ pCur, Type * __restrict__ mCur, Type * __restrict__ pSpace, Type * __restrict__ mSpace, Type * __restrict__ pOld, Type * __restrict__ mOld, const long BX_2D, const long BZ_2D) { const long nx4 = nx - 4; const long nz4 = nz - 4; const Type dt2 = dtMod * dtMod; // zero output arrays #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 0; bx < nx; bx += BX_2D) { for (long bz = 0; bz < nz; bz += BZ_2D) { const long kxmax = MIN(bx + BX_2D, nx); const long kzmax = MIN(bz + BZ_2D, nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { long k = kx * nz + kz; pSpace[k] = 0; mSpace[k] = 0; } } } } // interior #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_2D) { for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */ const long kxmax = MIN(bx + BX_2D, nx4); const long kzmax = MIN(bz + BZ_2D, nz4); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const Type stencilDPx = c8_1 * (- tmpPX[(kx-1) * nz + kz] + tmpPX[(kx+0) * nz + kz]) + c8_2 * (- tmpPX[(kx-2) * nz + kz] + tmpPX[(kx+1) * nz + kz]) + c8_3 * (- tmpPX[(kx-3) * nz + kz] + tmpPX[(kx+2) * nz + kz]) + c8_4 * (- tmpPX[(kx-4) * nz + kz] + tmpPX[(kx+3) * nz + kz]); const Type stencilDPz = c8_1 * (- tmpPZ[kx * nz + (kz-1)] + tmpPZ[kx * nz + (kz+0)]) + c8_2 * (- tmpPZ[kx * nz + (kz-2)] + tmpPZ[kx * nz + (kz+1)]) + c8_3 * (- tmpPZ[kx * nz + (kz-3)] + tmpPZ[kx * nz + (kz+2)]) + c8_4 * (- tmpPZ[kx * nz + (kz-4)] + tmpPZ[kx * nz + (kz+3)]); const Type stencilDMx = c8_1 * (- tmpMX[(kx-1) * nz + kz] + tmpMX[(kx+0) * nz + kz]) + c8_2 * (- tmpMX[(kx-2) * nz + kz] + tmpMX[(kx+1) * nz + kz]) + c8_3 * (- tmpMX[(kx-3) * nz + kz] + tmpMX[(kx+2) * nz + kz]) + c8_4 * (- tmpMX[(kx-4) * nz + kz] + tmpMX[(kx+3) * nz + kz]); const Type stencilDMz = c8_1 * (- tmpMZ[kx * nz + (kz-1)] + tmpMZ[kx * nz + (kz+0)]) + c8_2 * (- tmpMZ[kx * nz + (kz-2)] + tmpMZ[kx * nz + (kz+1)]) + c8_3 * (- tmpMZ[kx * nz + (kz-3)] + tmpMZ[kx * nz + (kz+2)]) + c8_4 * (- tmpMZ[kx * nz + (kz-4)] + tmpMZ[kx * nz + (kz+3)]); const Type dPX = invDx * stencilDPx; const Type dPZ = invDz * stencilDPz; const Type dMX = invDx * stencilDMx; const Type dMZ = invDz * stencilDMz; const long k = kx * nz + kz; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPX + dPZ; mSpace[k] = dMX + dMZ; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { // kz = 0 -- at the free surface -- p = 0 // [kx * nz + 0] { const Type dPX = 0; const Type dPZ = 0; const Type dMX = 0; const Type dMZ = 0; const long k = kx * nz + 0; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPX + dPZ; mSpace[k] = dMX + dMZ; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 1 -- one cell below the free surface // [kx * nz + 1] { const Type stencilDPx1 = c8_1 * (- tmpPX[(kx-1) * nz + 1] + tmpPX[(kx+0) * nz + 1]) + c8_2 * (- tmpPX[(kx-2) * nz + 1] + tmpPX[(kx+1) * nz + 1]) + c8_3 * (- tmpPX[(kx-3) * nz + 1] + tmpPX[(kx+2) * nz + 1]) + c8_4 * (- tmpPX[(kx-4) * nz + 1] + tmpPX[(kx+3) * nz + 1]); const Type stencilDPz1 = c8_1 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 1]) + c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 2]) + c8_3 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 3]) + c8_4 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 4]); const Type stencilDMx1 = c8_1 * (- tmpMX[(kx-1) * nz + 1] + tmpMX[(kx+0) * nz + 1]) + c8_2 * (- tmpMX[(kx-2) * nz + 1] + tmpMX[(kx+1) * nz + 1]) + c8_3 * (- tmpMX[(kx-3) * nz + 1] + tmpMX[(kx+2) * nz + 1]) + c8_4 * (- tmpMX[(kx-4) * nz + 1] + tmpMX[(kx+3) * nz + 1]); const Type stencilDMz1 = c8_1 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 1]) + c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 2]) + c8_3 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 3]) + c8_4 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 4]); const Type dPx = invDx * stencilDPx1; const Type dPz = invDz * stencilDPz1; const Type dMx = invDx * stencilDMx1; const Type dMz = invDz * stencilDMz1; const long k = kx * nz + 1; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPz; mSpace[k] = dMx + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 2 -- two cells below the free surface // [kx * nz + 2] { const Type stencilDPx2 = c8_1 * (- tmpPX[(kx-1) * nz + 2] + tmpPX[(kx+0) * nz + 2]) + c8_2 * (- tmpPX[(kx-2) * nz + 2] + tmpPX[(kx+1) * nz + 2]) + c8_3 * (- tmpPX[(kx-3) * nz + 2] + tmpPX[(kx+2) * nz + 2]) + c8_4 * (- tmpPX[(kx-4) * nz + 2] + tmpPX[(kx+3) * nz + 2]); const Type stencilDPz2 = c8_1 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 2]) + c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 3]) + c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 4]) + c8_4 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 5]); const Type stencilDMx2 = c8_1 * (- tmpMX[(kx-1) * nz + 2] + tmpMX[(kx+0) * nz + 2]) + c8_2 * (- tmpMX[(kx-2) * nz + 2] + tmpMX[(kx+1) * nz + 2]) + c8_3 * (- tmpMX[(kx-3) * nz + 2] + tmpMX[(kx+2) * nz + 2]) + c8_4 * (- tmpMX[(kx-4) * nz + 2] + tmpMX[(kx+3) * nz + 2]); const Type stencilDMz2 = c8_1 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 2]) + c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 3]) + c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 4]) + c8_4 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 5]); const Type dPx = invDx * stencilDPx2; const Type dPz = invDz * stencilDPz2; const Type dMx = invDx * stencilDMx2; const Type dMz = invDz * stencilDMz2; const long k = kx * nz + 2; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPz; mSpace[k] = dMx + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 3 -- three cells below the free surface // [kx * nz + 3] { const Type stencilDPx3 = c8_1 * (- tmpPX[(kx-1) * nz + 3] + tmpPX[(kx+0) * nz + 3]) + c8_2 * (- tmpPX[(kx-2) * nz + 3] + tmpPX[(kx+1) * nz + 3]) + c8_3 * (- tmpPX[(kx-3) * nz + 3] + tmpPX[(kx+2) * nz + 3]) + c8_4 * (- tmpPX[(kx-4) * nz + 3] + tmpPX[(kx+3) * nz + 3]); const Type stencilDPz3 = c8_1 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 3]) + c8_2 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 4]) + c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 5]) + c8_4 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 6]); const Type stencilDMx3 = c8_1 * (- tmpMX[(kx-1) * nz + 3] + tmpMX[(kx+0) * nz + 3]) + c8_2 * (- tmpMX[(kx-2) * nz + 3] + tmpMX[(kx+1) * nz + 3]) + c8_3 * (- tmpMX[(kx-3) * nz + 3] + tmpMX[(kx+2) * nz + 3]) + c8_4 * (- tmpMX[(kx-4) * nz + 3] + tmpMX[(kx+3) * nz + 3]); const Type stencilDMz3 = c8_1 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 3]) + c8_2 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 4]) + c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 5]) + c8_4 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 6]); const Type dPx = invDx * stencilDPx3; const Type dPz = invDz * stencilDPz3; const Type dMx = invDx * stencilDMx3; const Type dMz = invDz * stencilDMz3; const long k = kx * nz + 3; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPz; mSpace[k] = dMx + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives2D_MinusHalf_TimeUpdate_Linear( const long freeSurface, const long nx, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDz, const Type dtMod, const Type * __restrict__ const tmpPX, const Type * __restrict__ const tmpPZ, const Type * __restrict__ const tmpMX, const Type * __restrict__ const tmpMZ, const Type * __restrict__ const fieldVel, const Type * __restrict__ const fieldBuoy, const Type * __restrict__ const dtOmegaInvQ, Type * __restrict__ pCur, Type * __restrict__ mCur, Type * __restrict__ pOld, Type * __restrict__ mOld, const long BX_2D, const long BZ_2D) { const long nx4 = nx - 4; const long nz4 = nz - 4; const Type dt2 = dtMod * dtMod; // interior #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_2D) { for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */ const long kxmax = MIN(bx + BX_2D, nx4); const long kzmax = MIN(bz + BZ_2D, nz4); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kxnz = kx * nz; const long k = kxnz + kz; const Type stencilDPx = c8_1 * (- tmpPX[(kx-1) * nz + kz] + tmpPX[(kx+0) * nz + kz]) + c8_2 * (- tmpPX[(kx-2) * nz + kz] + tmpPX[(kx+1) * nz + kz]) + c8_3 * (- tmpPX[(kx-3) * nz + kz] + tmpPX[(kx+2) * nz + kz]) + c8_4 * (- tmpPX[(kx-4) * nz + kz] + tmpPX[(kx+3) * nz + kz]); const Type stencilDPz = c8_1 * (- tmpPZ[kxnz + (kz-1)] + tmpPZ[kxnz + (kz+0)]) + c8_2 * (- tmpPZ[kxnz + (kz-2)] + tmpPZ[kxnz + (kz+1)]) + c8_3 * (- tmpPZ[kxnz + (kz-3)] + tmpPZ[kxnz + (kz+2)]) + c8_4 * (- tmpPZ[kxnz + (kz-4)] + tmpPZ[kxnz + (kz+3)]); const Type stencilDMx = c8_1 * (- tmpMX[(kx-1) * nz + kz] + tmpMX[(kx+0) * nz + kz]) + c8_2 * (- tmpMX[(kx-2) * nz + kz] + tmpMX[(kx+1) * nz + kz]) + c8_3 * (- tmpMX[(kx-3) * nz + kz] + tmpMX[(kx+2) * nz + kz]) + c8_4 * (- tmpMX[(kx-4) * nz + kz] + tmpMX[(kx+3) * nz + kz]); const Type stencilDMz = c8_1 * (- tmpMZ[kxnz + (kz-1)] + tmpMZ[kxnz + (kz+0)]) + c8_2 * (- tmpMZ[kxnz + (kz-2)] + tmpMZ[kxnz + (kz+1)]) + c8_3 * (- tmpMZ[kxnz + (kz-3)] + tmpMZ[kxnz + (kz+2)]) + c8_4 * (- tmpMZ[kxnz + (kz-4)] + tmpMZ[kxnz + (kz+3)]); const Type dPx = invDx * stencilDPx; const Type dPz = invDz * stencilDPz; const Type dMx = invDx * stencilDMx; const Type dMz = invDz * stencilDMz; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { // kz = 0 -- at the free surface -- p = 0 // [kx * nz + 0] { const Type dPX = 0; const Type dPZ = 0; const Type dMX = 0; const Type dMZ = 0; const long k = kx * nz + 0; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pOld[k] = dt2V2_B * (dPX + dPZ) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * (dMX + dMZ) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 1 -- one cell below the free surface // [kx * nz + 1] { const Type stencilDPx1 = c8_1 * (- tmpPX[(kx-1) * nz + 1] + tmpPX[(kx+0) * nz + 1]) + c8_2 * (- tmpPX[(kx-2) * nz + 1] + tmpPX[(kx+1) * nz + 1]) + c8_3 * (- tmpPX[(kx-3) * nz + 1] + tmpPX[(kx+2) * nz + 1]) + c8_4 * (- tmpPX[(kx-4) * nz + 1] + tmpPX[(kx+3) * nz + 1]); const Type stencilDPz1 = c8_1 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 1]) + c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 2]) + c8_3 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 3]) + c8_4 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 4]); const Type stencilDMx1 = c8_1 * (- tmpMX[(kx-1) * nz + 1] + tmpMX[(kx+0) * nz + 1]) + c8_2 * (- tmpMX[(kx-2) * nz + 1] + tmpMX[(kx+1) * nz + 1]) + c8_3 * (- tmpMX[(kx-3) * nz + 1] + tmpMX[(kx+2) * nz + 1]) + c8_4 * (- tmpMX[(kx-4) * nz + 1] + tmpMX[(kx+3) * nz + 1]); const Type stencilDMz1 = c8_1 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 1]) + c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 2]) + c8_3 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 3]) + c8_4 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 4]); const Type dPx = invDx * stencilDPx1; const Type dPz = invDz * stencilDPz1; const Type dMx = invDx * stencilDMx1; const Type dMz = invDz * stencilDMz1; const long k = kx * nz + 1; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 2 -- two cells below the free surface // [kx * nz + 2] { const Type stencilDPx2 = c8_1 * (- tmpPX[(kx-1) * nz + 2] + tmpPX[(kx+0) * nz + 2]) + c8_2 * (- tmpPX[(kx-2) * nz + 2] + tmpPX[(kx+1) * nz + 2]) + c8_3 * (- tmpPX[(kx-3) * nz + 2] + tmpPX[(kx+2) * nz + 2]) + c8_4 * (- tmpPX[(kx-4) * nz + 2] + tmpPX[(kx+3) * nz + 2]); const Type stencilDPz2 = c8_1 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 2]) + c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 3]) + c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 4]) + c8_4 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 5]); const Type stencilDMx2 = c8_1 * (- tmpMX[(kx-1) * nz + 2] + tmpMX[(kx+0) * nz + 2]) + c8_2 * (- tmpMX[(kx-2) * nz + 2] + tmpMX[(kx+1) * nz + 2]) + c8_3 * (- tmpMX[(kx-3) * nz + 2] + tmpMX[(kx+2) * nz + 2]) + c8_4 * (- tmpMX[(kx-4) * nz + 2] + tmpMX[(kx+3) * nz + 2]); const Type stencilDMz2 = c8_1 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 2]) + c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 3]) + c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 4]) + c8_4 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 5]); const Type dPx = invDx * stencilDPx2; const Type dPz = invDz * stencilDPz2; const Type dMx = invDx * stencilDMx2; const Type dMz = invDz * stencilDMz2; const long k = kx * nz + 2; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 3 -- three cells below the free surface // [kx * nz + 3] { const Type stencilDPx3 = c8_1 * (- tmpPX[(kx-1) * nz + 3] + tmpPX[(kx+0) * nz + 3]) + c8_2 * (- tmpPX[(kx-2) * nz + 3] + tmpPX[(kx+1) * nz + 3]) + c8_3 * (- tmpPX[(kx-3) * nz + 3] + tmpPX[(kx+2) * nz + 3]) + c8_4 * (- tmpPX[(kx-4) * nz + 3] + tmpPX[(kx+3) * nz + 3]); const Type stencilDPz3 = c8_1 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 3]) + c8_2 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 4]) + c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 5]) + c8_4 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 6]); const Type stencilDMx3 = c8_1 * (- tmpMX[(kx-1) * nz + 3] + tmpMX[(kx+0) * nz + 3]) + c8_2 * (- tmpMX[(kx-2) * nz + 3] + tmpMX[(kx+1) * nz + 3]) + c8_3 * (- tmpMX[(kx-3) * nz + 3] + tmpMX[(kx+2) * nz + 3]) + c8_4 * (- tmpMX[(kx-4) * nz + 3] + tmpMX[(kx+3) * nz + 3]); const Type stencilDMz3 = c8_1 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 3]) + c8_2 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 4]) + c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 5]) + c8_4 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 6]); const Type dPx = invDx * stencilDPx3; const Type dPz = invDz * stencilDPz3; const Type dMx = invDx * stencilDMx3; const Type dMz = invDz * stencilDMz3; const long k = kx * nz + 3; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } }; #endif
GB_unaryop__minv_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_fp32 // op(A') function: GB_tran__minv_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
CFrameRGBD.h
/* * Photoconsistency-Visual-Odometry * Multiscale Photoconsistency Visual Odometry from RGBD Images * Copyright (c) 2012, Miguel Algaba Borrego * * http://code.google.com/p/photoconsistency-visual-odometry/ * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the holder(s) nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef CFRAME_RGBD #define CFRAME_RGBD #define ENABLE_POINTCLOUD_DOWNSAMPLER 1 #define ENABLE_OPENMP_MULTITHREADING_FRAMERGBD 0 //Enables multithreading for CFrameRGBD #if ENABLE_POINTCLOUD_DOWNSAMPLER #include "PointCloudDownsampler.h" #else #include <pcl/filters/voxel_grid.h> #endif #include <pcl/point_cloud.h> #include <pcl/point_types.h> #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include <fstream> #include <boost/lexical_cast.hpp> #include "cvmat_serialization.h" #include <boost/archive/binary_oarchive.hpp> #include <boost/archive/binary_iarchive.hpp> #include <fstream> /*! The class CFrameRGBD encapsulates the RGB and depht data of a certain RGBD frame. It also contains the timestamp of the RGBD data. */ class CFrameRGBD { private: /*!RGB image*/ cv::Mat m_rgbImage; /*!Intensity image (grayscale version of the RGB image)*/ cv::Mat m_intensityImage; /*!Depth image*/ cv::Mat m_depthImage; /*!Coloured point cloud*/ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr m_pointCloudPtr; /*!Downsampled coloured point cloud*/ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr m_downsampledPointCloudPtr; /*!True if the coloured point cloud is available, false otherwise*/ bool pointCloudAvailable; /*!True if the downsampled coloured point cloud is available, false otherwise*/ bool downsampledPointCloudAvailable; /*!True if the intensity image is available, false otherwise*/ bool intensityImageAvailable; /*!Timestamp of the RGBD frame*/ uint64_t m_timeStamp; /*!Max pointcloud depth*/ float maxDepth; /*!Min pointcloud depth*/ float minDepth; public: CFrameRGBD() { pointCloudAvailable=false; downsampledPointCloudAvailable=false; intensityImageAvailable=false; minDepth = 0.3; // Default min depth maxDepth = 5.0; // Default max depth }; ~CFrameRGBD(){}; /*!Sets a RGB image to the RGBD frame.*/ inline void setRGBImage(const cv::Mat & rgbImage){m_rgbImage = rgbImage;} /*!Returns the RGB image of the RGBD frame.*/ inline cv::Mat & getRGBImage(){return m_rgbImage;} /*!Sets a depth image to the RGBD frame.*/ inline void setDepthImage(const cv::Mat & depthImage){m_depthImage = depthImage;} /*!Returns the depth image of the RGBD frame.*/ inline cv::Mat & getDepthImage(){return m_depthImage;} /*!Set the RGBD frame timestamp*/ inline void setTimeStamp(uint64_t timeStamp){m_timeStamp=timeStamp;}; /*!Returns the RGBD frame timestamp*/ inline uint64_t getTimeStamp(){return m_timeStamp;}; /*!Gets a grayscale version of the RGB image*/ inline cv::Mat & getIntensityImage() { //If the intensity image has been already computed, don't compute it again if(!intensityImageAvailable) { cv::cvtColor(m_rgbImage,m_intensityImage,CV_BGR2GRAY); //The intensity image is now available intensityImageAvailable = true; } return m_intensityImage; } /*!Sets the max depth value for the point cloud points.*/ inline void setMaxPointCloudDepth(float maxD) { maxDepth = maxD; } /*!Sets the min depth value for the point cloud points.*/ inline void setMinPointCloudDepth(float minD) { minDepth = minD; } /*!Returns the max depth value for point cloud points.*/ inline float getMaxPointCloudDepth() { return maxDepth; } /*!Returns the min depth value for point cloud points.*/ inline float getMinPointCloudDepth() { return minDepth; } /*!Gets a 3D coloured point cloud from the RGBD data using the camera parameters*/ inline pcl::PointCloud<pcl::PointXYZRGBA>::Ptr getPointCloud(const Eigen::Matrix3f & cameraMatrix) { //If the point cloud has been already computed, don't compute it again if(!pointCloudAvailable) { const float inv_fx = 1.f/cameraMatrix(0,0); const float inv_fy = 1.f/cameraMatrix(1,1); const float ox = cameraMatrix(0,2); const float oy = cameraMatrix(1,2); int height = m_rgbImage.rows; int width = m_rgbImage.cols; m_pointCloudPtr.reset(new pcl::PointCloud<pcl::PointXYZRGBA>()); m_pointCloudPtr->height = height; m_pointCloudPtr->width = width; m_pointCloudPtr->is_dense = false; m_pointCloudPtr->points.resize(height*width); #if ENABLE_OPENMP_MULTITHREADING_FRAMERGBD #pragma omp parallel for #endif for( int y = 0; y < height; y++ ) { for( int x = 0; x < width; x++ ) { float z = m_depthImage.at<float>(y,x); //convert from milimeters to meters if(z>0 && z>=minDepth && z<=maxDepth) //If the point has valid depth information assign the 3D point to the point cloud { m_pointCloudPtr->points[width*y+x].x = z; m_pointCloudPtr->points[width*y+x].y = -(x - ox) * z * inv_fx; m_pointCloudPtr->points[width*y+x].z = -(y - oy) * z * inv_fy; cv::Vec3b& bgr = m_rgbImage.at<cv::Vec3b>(y,x); m_pointCloudPtr->points[width*y+x].r = bgr[2]; m_pointCloudPtr->points[width*y+x].g = bgr[1]; m_pointCloudPtr->points[width*y+x].b = bgr[0]; } else //else, assign a NAN value { m_pointCloudPtr->points[width*y+x].x = std::numeric_limits<float>::quiet_NaN (); m_pointCloudPtr->points[width*y+x].y = std::numeric_limits<float>::quiet_NaN (); m_pointCloudPtr->points[width*y+x].z = std::numeric_limits<float>::quiet_NaN (); } } } //The point cloud is now available pointCloudAvailable = true; } return m_pointCloudPtr; } /*!Gets a downsampled version of the RGB point cloud*/ inline pcl::PointCloud<pcl::PointXYZRGBA>::Ptr getDownsampledPointCloud(const Eigen::Matrix3f & cameraMatrix) { //If the downsampled point cloud has been already computed, don't compute it again if(!downsampledPointCloudAvailable) { m_downsampledPointCloudPtr.reset(new pcl::PointCloud<pcl::PointXYZRGBA>()); #if ENABLE_POINTCLOUD_DOWNSAMPLER PointCloudDownsampler grid; grid = PointCloudDownsampler(8); grid.setMaximumDepth(maxDepth); grid.setMinimumDepth(minDepth); grid.downsamplePointCloudColor(getPointCloud(cameraMatrix),m_downsampledPointCloudPtr); #else pcl::VoxelGrid<pcl::PointXYZRGBA> grid; grid.setLeafSize(0.04,0.04,0.04); grid.setFilterFieldName ("x"); grid.setFilterLimits (0.3,5.0); grid.setInputCloud (getPointCloud(cameraMatrix)); grid.filter(*m_downsampledPointCloudPtr); #endif //The downsampled point cloud is now available downsampledPointCloudAvailable = true; } return m_downsampledPointCloudPtr; } void getMatrixNumberRepresentationOf_uint64_t(uint64_t number,cv::Mat & matrixNumberRepresentation) { //Determine the number of digits of the number int num_digits = 0; uint64_t number_aux = number; while(number_aux > 0) { num_digits++; number_aux/=10; } //Compute the matrix representation of the number matrixNumberRepresentation = cv::Mat::zeros(1,num_digits,CV_8U); uint64_t remainder = number; for(int digitIndex=0;digitIndex<num_digits;digitIndex++) { if(remainder==0){break;} uint8_t greaterDigit; uint64_t dividend = remainder; uint64_t divisor = pow(10,num_digits-1-digitIndex); uint64_t quotient = remainder / divisor; greaterDigit = quotient; matrixNumberRepresentation.at<uint8_t>(0,digitIndex)=greaterDigit; remainder = dividend - divisor * quotient; } } void saveToFile(std::string fileName) { std::ofstream ofs(fileName.append(".bin").c_str(), std::ios::out | std::ios::binary); { // use scope to ensure archive goes out of scope before stream boost::archive::binary_oarchive oa(ofs); cv::Mat timeStampMatrix; getMatrixNumberRepresentationOf_uint64_t(m_timeStamp,timeStampMatrix); oa << m_depthImage << m_rgbImage << timeStampMatrix; } ofs.close(); } void get_uint64_t_ofMatrixRepresentation(cv::Mat & matrixNumberRepresentation,uint64_t & number) { int num_digits = matrixNumberRepresentation.cols; number=0; uint64_t power10=1; for(int digitIndex=num_digits-1;digitIndex>=0;digitIndex--) { number += power10 * uint64_t(matrixNumberRepresentation.at<uint8_t>(0,digitIndex)); power10 = power10 * 10; } } void loadFromFile(std::string fileName) { std::ifstream ifs(fileName.append(".bin").c_str(), std::ios::in | std::ios::binary); { // use scope to ensure archive goes out of scope before stream boost::archive::binary_iarchive ia(ifs); cv::Mat timeStampMatrix; ia >> m_depthImage >> m_rgbImage >> timeStampMatrix; get_uint64_t_ofMatrixRepresentation(timeStampMatrix,m_timeStamp); } ifs.close(); //Initialize the intensity image with an empty matrix m_intensityImage = cv::Mat(); intensityImageAvailable = false; //The intensity image is initially unavailable //Initialize the point cloud with an empty pointer m_pointCloudPtr.reset(new pcl::PointCloud<pcl::PointXYZRGBA>()); pointCloudAvailable = false; //The point cloud is initially unavailable //Initialize the downsampled point cloud with an empty pointer m_downsampledPointCloudPtr.reset(new pcl::PointCloud<pcl::PointXYZRGBA>()); downsampledPointCloudAvailable = false; //The downsampled point cloud is initially unavailable } }; #endif
8580.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < _PB_N; i++) { #pragma omp target teams distribute schedule(dynamic, 14) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp target teams distribute schedule(dynamic, 14) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
ten_tusscher_2004_RS_CPU_epi_Test.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_Test.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { //inital conndition //Scenario 2 // real sv11[]={-86.7599490237245,0.00123831208622928,0.784376608695859,0.784218467628080,0.000170016808347696,0.487085364989106,0.00290043259117021,0.999998410220405,1.87270147822737e-08,1.84334654710491e-05,0.999776444937499,1.00727320017378,0.999997421410314,4.09813553215966e-05,1.00091265418338,9.36478320062292,139.974256946572}; //Scenario 3 //real sv11[]={-86.6832615134402,0.00125876883400146,0.782519885686078,0.782385890597164,0.000171886605918564,0.486287153523371,0.00291631476093424,0.999998385692801,1.89678233086951e-08,1.86229043360926e-05,0.999783587315930,1.00721445029128,0.999996850289244,4.23696052205578e-05,0.487079901995765,10.1298949658907,139.478138182002}; //Scenario 4 //real sv11[]={-86.7531659359261,0.00124010826721524,0.784213090011930,0.784063751337305,0.000170184867440439,0.487014769904825,0.00290183337641837,0.999998408105558,1.87481748650298e-08,1.84501422061852e-05,0.999773598689194,1.00768875506436,0.999999512997626,3.10350472687116e-05,1.04650592961489,10.1580626436712,139.167353745914}; //Scenario4_1_106_pop76 //real sv11[]={-86.6337556349546,0.00127215057254844,0.781315329700828,0.781192702879389,0.000173232959601247,0.485771934772721,0.00292661184320977,0.999998369627955,1.91248713554218e-08,1.87462257542883e-05,0.999765973534775,1.00688195901693,0.999991331074147,5.01588072510622e-05,0.719318246052902,9.82154696449291,139.637347751159}; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///initial condition //Scenario 2 real sv11[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; //Scenario 3 //real sv11[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272}; //Scenario4 //real sv11[]={-86.7596599603487,0.00123838857632763,0.784369818846026,0.784223148947282,0.000169972136689011,0.487082365294413,0.00290049182352458,0.999998410215409,1.87279005544269e-08,1.84341746908718e-05,0.999781004659642,1.00771223118124,0.999999564103621,3.04673432492567e-05,0.993358298469861,10.1763606222150,139.168522102236}; //Scenario4_1 //real sv11[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049}; sv[0] = sv11[0]; // V; millivolt sv[1] = sv11[1]; //M sv[2] = sv11[2]; //H sv[3] = sv11[3]; //J sv[4] = sv11[4]; //Xr1 sv[5] = sv11[5]; //Xr2 sv[6] = sv11[6]; //Xs sv[7] = sv11[7]; //S sv[8] = sv11[8]; //R sv[9] = sv11[9]; //D sv[10] = sv11[10]; //F sv[11] = sv11[11]; //FCa sv[12] = sv11[12]; //G sv[13] = sv11[13]; //Cai sv[14] = sv11[14]; //CaSR sv[15] = sv11[15]; //Nai sv[16] = sv11[16]; //Ki // sv[0] = INITIAL_V; // V; millivolt // sv[1] = 0.f; //M // sv[2] = 0.75; //H // sv[3] = 0.75f; //J // sv[4] = 0.f; //Xr1 // sv[5] = 1.f; //Xr2 // sv[6] = 0.f; //Xs // sv[7] = 1.f; //S // sv[8] = 0.f; //R // sv[9] = 0.f; //D // sv[10] = 1.f; //F // sv[11] = 1.f; //FCa // sv[12] = 1.f; //G // sv[13] = 0.0002; //Cai // sv[14] = 0.2f; //CaSR // sv[15] = 11.6f; //Nai // sv[16] = 138.3f; //Ki } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; ///Scenario 2: real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05}; ///Scenario 3: //real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05}; ///Scenario 4: //real parameters []={14.6970262149558,2.32527331724419e-05,0.000121747898718481,0.000276971880166082,0.210038991991875,0.120908114803453,0.200498466936257,5.12988959137240,0.0151231713364490,1.26415205898593,1083.02600285230,0.000542147164379904,0.160470068504854,0.0146070055973378,0.00183114105726186,1.00487709573505e-05}; //Scenario4_1_106_pop76 //real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05}; real GNa=parameters[0]; real GbNa=parameters[1]; real GCaL=parameters[2]; real GbCa=parameters[3]; real Gto=parameters[4]; real Gkr=parameters[5]; real Gks=parameters[6]; real GK1=parameters[7]; real GpK=parameters[8]; real knak=parameters[9]; real knaca=parameters[10]; real Vmaxup=parameters[11]; real GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; /// real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr /// real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI /// real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 /// real GK1=5.405; //Parameters for Ito ///#ifdef EPI /// real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa /// real GNa=14.838; //Parameters for IbNa /// real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; /// real knak=1.362; //Parameters for ICaL /// real GCaL=0.000175; //Parameters for IbCa /// real GbCa=0.000592; //Parameters for INaCa /// real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa /// real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; /// real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
ex.c
#include <omp.h> #include <stdlib.h> //include header file #include "Accessor_c.h" int main(){ int * x = (int *)malloc(sizeof(int)*10); int * y = (int *)malloc(sizeof(int)*10); int * z = (int *)malloc(sizeof(int)*10); // declare a struct on the format accessor_<ACCESS_MODE>_<TYPE> accessor_read_int ac_x; accessor_read_int ac_y; accessor_write_int ac_z; // set accessor struct data members ac_x.data = x; ac_x.len = 10; ac_y.data = y; ac_y.len = 10; ac_z.data = z; ac_z.len = 10; #pragma omp target teams distribute parallel for for (int i=0; i< 10; i++){ // access the "data" member of the accessor struct ac_z.data[i] = ac_x.data[i] + ac_y.data[i]; } // free dynamic data free(x); free(y); free(z); return 0; }
ellipticBuildJacobi.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A); void BuildLocalContinuousDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A); void ellipticBuildJacobi(elliptic_t* elliptic, dfloat lambda, dfloat **invDiagA){ mesh_t *mesh = elliptic->mesh; setupAide options = elliptic->options; // surface mass matrices MS = MM*LIFT dfloat *MS = (dfloat *) calloc(mesh->Nfaces*mesh->Nfp*mesh->Nfp,sizeof(dfloat)); for (int f=0;f<mesh->Nfaces;f++) { for (int n=0;n<mesh->Nfp;n++) { int fn = mesh->faceNodes[f*mesh->Nfp+n]; for (int m=0;m<mesh->Nfp;m++) { dfloat MSnm = 0; for (int i=0;i<mesh->Np;i++){ MSnm += mesh->MM[fn+i*mesh->Np]*mesh->LIFT[i*mesh->Nfp*mesh->Nfaces+f*mesh->Nfp+m]; } MS[m+n*mesh->Nfp + f*mesh->Nfp*mesh->Nfp] = MSnm; } } } // build some monolithic basis arrays (for quads and hexes) dfloat *B = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Br = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bs = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bt = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); if (elliptic->elementType==QUADRILATERALS) { int mode = 0; for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; ++node; } } ++mode; } } } if (elliptic->elementType==HEXAHEDRA) { int mode = 0; for(int nk=0;nk<mesh->N+1;++nk){ for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int k=0;k<mesh->N+1;++k){ for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nk==k && nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j && nk==k) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i && nk==k) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; if(ni==i && nj==j) Bt[mode*mesh->Np+node] = mesh->D[nk+mesh->Nq*k]; ++node; } } } ++mode; } } } } dlong diagNnum = mesh->Np*mesh->Nelements; dfloat *diagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); if(mesh->rank==0) printf("Building diagonal...");fflush(stdout); if (options.compareArgs("DISCRETIZATION","IPDG")) { switch(elliptic->elementType){ case TRIANGLES: if (options.compareArgs("BASIS","BERN")) { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgBBDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } else { if(mesh->dim==2){ #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM){ BuildLocalIpdgDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } } else{ #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM){ BuildLocalIpdgDiagTri3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } } } break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagQuad2D(elliptic, mesh, lambda, MS, B, Br, Bs, eM, diagA + eM*mesh->Np); // TW: MISSING break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTet3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagHex3D(elliptic, mesh, lambda, MS, B, Br, Bs, Bt, eM, diagA + eM*mesh->Np); break; } } else if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) { switch(elliptic->elementType){ case TRIANGLES: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTri2D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case QUADRILATERALS:{ #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM){ if(elliptic->dim==2) BuildLocalContinuousDiagQuad2D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); if(elliptic->dim==3) BuildLocalContinuousDiagQuad3D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); } }break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTet3D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagHex3D(elliptic, mesh, lambda, eM, B, Br, Bs, Bt, diagA + eM*mesh->Np); break; } } if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) ogsGatherScatter(diagA, ogsDfloat, ogsAdd, elliptic->ogs); *invDiagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); for (dlong n=0;n<mesh->Nelements*mesh->Np;n++) { (*invDiagA)[n] = 1/diagA[n]; } if(mesh->rank==0) printf("done.\n"); free(diagA); free(MS); free(B); free(Br); free(Bs); free(Bt); } void BuildLocalIpdgDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; } } } } } } void BuildLocalIpdgDiagTri3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdz*drdz*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdz*dsdz*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdz*drdz*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdz*dsdz*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalIpdgPatchAxTri2D(elliptic_t* elliptic, mesh_t* mesh, int basisNp, dfloat *basis, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); //generate the BB diagonal by extracting it from the transformed patch void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dfloat *patchA = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); int basisNp = mesh->Np; dfloat *basis = mesh->VB; BuildLocalIpdgPatchAxTri2D(elliptic, mesh, basisNp, basis, lambda, MS, eM, patchA); for(int n=0;n<mesh->Np;++n) { A[n] = patchA[n*mesh->Np+n]; //store the diagonal entry } free(patchA); } //returns the continuous C0 patch A matrix for element eM void BuildLocalContinuousDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; if (elliptic->mapB[nx+ny*mesh->Nq+eM*mesh->Np]!=1) { A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } else { A[iid] = 1; //just put a 1 so A is invertable } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz; A[n] += JW*(dlndx*dlndx + dlndy*dlndy + dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM ; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; A[n] += -0.5*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; // id = nx+ny*mesh->Nq; // dfloat Grt = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G02ID*mesh->Np]; // A[iid] += 2*Grt*mesh->D[nx+nx*mesh->Nq]; // id = nx+ny*mesh->Nq; // dfloat Gst = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G12ID*mesh->Np]; // A[iid] += 2*Gst*mesh->D[ny+ny*mesh->Nq]; // dfloat Gtt = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G22ID*mesh->Np]; // A[iid] += Gtt; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } } } void BuildLocalIpdgDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat dtdx = mesh->vgeo[vbase+TXID]; dfloat dtdy = mesh->vgeo[vbase+TYID]; dfloat dtdz = mesh->vgeo[vbase+TZID]; dfloat J = mesh->vgeo[vbase+JID]; dfloat G00 = drdx*drdx + drdy*drdy + drdz*drdz; dfloat G01 = drdx*dsdx + drdy*dsdy + drdz*dsdz; dfloat G02 = drdx*dtdx + drdy*dtdy + drdz*dtdz; dfloat G10 = dsdx*drdx + dsdy*drdy + dsdz*drdz; dfloat G11 = dsdx*dsdx + dsdy*dsdy + dsdz*dsdz; dfloat G12 = dsdx*dtdx + dsdy*dtdy + dsdz*dtdz; dfloat G20 = dtdx*drdx + dtdy*drdy + dtdz*drdz; dfloat G21 = dtdx*dsdx + dtdy*dsdy + dtdz*dsdz; dfloat G22 = dtdx*dtdx + dtdy*dtdy + dtdz*dtdz; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*G00*mesh->Srr[n*mesh->Np+n]; A[n] += J*G01*mesh->Srs[n*mesh->Np+n]; A[n] += J*G02*mesh->Srt[n*mesh->Np+n]; A[n] += J*G10*mesh->Ssr[n*mesh->Np+n]; A[n] += J*G11*mesh->Sss[n*mesh->Np+n]; A[n] += J*G12*mesh->Sst[n*mesh->Np+n]; A[n] += J*G20*mesh->Str[n*mesh->Np+n]; A[n] += J*G21*mesh->Sts[n*mesh->Np+n]; A[n] += J*G22*mesh->Stt[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ for(int m=0;m<mesh->Nfp;++m){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM] + dtdx*mesh->Dt[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM] + dtdy*mesh->Dt[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM] + dtdz*mesh->Dt[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n] + dtdx*mesh->Dt[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n] + dtdy*mesh->Dt[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n] + dtdz*mesh->Dt[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*nz*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalContinuousDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grt*mesh->Srt[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; A[n] += Gst*mesh->Sst[n+n*mesh->Np]; A[n] += Grt*mesh->Str[n+n*mesh->Np]; A[n] += Gst*mesh->Sts[n+n*mesh->Np]; A[n] += Gtt*mesh->Stt[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx*Bt[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy*Bt[idn]; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz*Bt[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy+dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM*Bt[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM*Bt[idnM]; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM*Bt[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (elliptic->mapB[idn+eM*mesh->Np]!=1) { A[idn] = 0; int id = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dlong base = eM*mesh->Np*mesh->Nggeo; dfloat Grs = mesh->ggeo[base + id + G01ID*mesh->Np]; A[idn] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat Grt = mesh->ggeo[base + id + G02ID*mesh->Np]; A[idn] += 2*Grt*mesh->D[nx+nx*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; dfloat Gst = mesh->ggeo[base + id + G12ID*mesh->Np]; A[idn] += 2*Gst*mesh->D[ny+ny*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; for (int k=0;k<mesh->Nq;k++) { int iid = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[base + iid + G00ID*mesh->Np]; A[idn] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[base + iid + G11ID*mesh->Np]; A[idn] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[base + iid + G22ID*mesh->Np]; A[idn] += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[nz+k*mesh->Nq]; } dfloat JW = mesh->ggeo[base + id + GWJID*mesh->Np]; A[idn] += JW*lambda; } else { A[idn] = 1; //just put a 1 so A is invertable } } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } }
GB_binop__bclr_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_int64 // A.*B function (eWiseMult): GB_AemultB__bclr_int64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_int64 // C+=b function (dense accum): GB_Cdense_accumb__bclr_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int64 // C=scalar+B GB_bind1st__bclr_int64 // C=scalar+B' GB_bind1st_tran__bclr_int64 // C=A+scalar GB_bind2nd__bclr_int64 // C=A'+scalar GB_bind2nd_tran__bclr_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITCLR (x, y, int64_t, 64) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bclr_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \ } GrB_Info GB_bind1st_tran__bclr_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \ } GrB_Info GB_bind2nd_tran__bclr_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % John Cristy % % December 2003 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/compare.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/statistic.h" #include "magick/transform.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImageChannels() compares one or more image channels of an image % to a reconstructed image and returns the difference image. % % The format of the CompareImageChannels method is: % % Image *CompareImageChannels(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { Image *highlight_image; highlight_image=CompareImageChannels(image,reconstruct_image, CompositeChannels,metric,distortion,exception); return(highlight_image); } MagickExport Image *CompareImageChannels(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; Image *difference_image, *highlight_image; MagickBooleanType status; MagickPixelPacket highlight, lowlight, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); status=GetImageChannelDistortion(image,reconstruct_image,channel,metric, distortion,exception); if (status == MagickFalse) return((Image *) NULL); difference_image=CloneImage(image,0,0,MagickTrue,exception); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel); highlight_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse) { InheritException(exception,&highlight_image->exception); difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel); (void) QueryMagickColor("#f1001ecc",&highlight,exception); artifact=GetImageArtifact(image,"highlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&highlight,exception); (void) QueryMagickColor("#ffffffcc",&lowlight,exception); artifact=GetImageArtifact(image,"lowlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&lowlight,exception); if (highlight_image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&lowlight); } /* Generate difference image. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); highlight_view=AcquireCacheView(highlight_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register IndexPacket *restrict highlight_indexes; register PixelPacket *restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,highlight_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view); pixel=zero; reconstruct_pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { MagickStatusType difference; SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); difference=MagickFalse; if (channel == CompositeChannels) { if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) difference=MagickTrue; } else { if (((channel & RedChannel) != 0) && (GetPixelRed(p) != GetPixelRed(q))) difference=MagickTrue; if (((channel & GreenChannel) != 0) && (GetPixelGreen(p) != GetPixelGreen(q))) difference=MagickTrue; if (((channel & BlueChannel) != 0) && (GetPixelBlue(p) != GetPixelBlue(q))) difference=MagickTrue; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) difference=MagickTrue; if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) && (GetPixelIndex(indexes+x) != GetPixelIndex(reconstruct_indexes+x))) difference=MagickTrue; } if (difference != MagickFalse) SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x); else SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x); p++; q++; r++; } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,image->compose,highlight_image,0,0); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistortion() compares one or more image channels of an image % to a reconstructed image and returns the specified distortion metric. % % The format of the CompareImageChannels method is: % % MagickBooleanType GetImageChannelDistortion(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels, metric,distortion,exception); return(status); } static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); pixel=zero; reconstruct_pixel=pixel; (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) { if ((channel & RedChannel) != 0) channel_distortion[RedChannel]++; if ((channel & GreenChannel) != 0) channel_distortion[GreenChannel]++; if ((channel & BlueChannel) != 0) channel_distortion[BlueChannel]++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channel_distortion[OpacityChannel]++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channel_distortion[BlackChannel]++; channel_distortion[CompositeChannels]++; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static size_t GetNumberChannels(const Image *image, const ChannelType channel) { size_t channels; channels=0; if ((channel & RedChannel) != 0) channels++; if ((channel & GreenChannel) != 0) channels++; if ((channel & BlueChannel) != 0) channels++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channels++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channels++; return(channels); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*(GetPixelRed(p)-(MagickRealType) GetPixelRed(q)); channel_distortion[RedChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(GetPixelGreen(p)-(MagickRealType) GetPixelGreen(q)); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q)); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) { distance=QuantumScale*((image->matte != MagickFalse ? GetPixelOpacity(p) : OpaqueOpacity)- (reconstruct_image->matte != MagickFalse ? GetPixelOpacity(q): OpaqueOpacity)); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(GetPixelIndex(indexes+x)- (MagickRealType) GetPixelIndex(reconstruct_indexes+x)); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]/=((double) image->columns*image->rows); if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) distortion[CompositeChannels]/=(double) (GetNumberChannels(image,channel)-1); else distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel); distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(GetPixelRed(p)-(double) GetPixelRed(q)); channel_distortion[RedChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(GetPixelGreen(p)-(double) GetPixelGreen(q)); channel_distortion[GreenChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(GetPixelBlue(p)-(double) GetPixelBlue(q)); channel_distortion[BlueChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); channel_distortion[OpacityChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reconstruct_indexes+x)); channel_distortion[BlackChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickRealType alpha, area, beta, maximum_error, mean_error; ssize_t y; status=MagickTrue; alpha=1.0; beta=1.0; area=0.0; maximum_error=0.0; mean_error=0.0; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & OpacityChannel) != 0) { if (image->matte != MagickFalse) alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); if (reconstruct_image->matte != MagickFalse) beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } if ((channel & RedChannel) != 0) { distance=fabs(alpha*GetPixelRed(p)-beta* GetPixelRed(q)); distortion[RedChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & GreenChannel) != 0) { distance=fabs(alpha*GetPixelGreen(p)-beta* GetPixelGreen(q)); distortion[GreenChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & BlueChannel) != 0) { distance=fabs(alpha*GetPixelBlue(p)-beta* GetPixelBlue(q)); distortion[BlueChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=fabs((double) GetPixelOpacity(p)- GetPixelOpacity(q)); distortion[OpacityChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(alpha*GetPixelIndex(indexes+x)-beta* GetPixelIndex(reconstruct_indexes+x)); distortion[BlackChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositeChannels]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*(GetPixelRed(p)-(MagickRealType) GetPixelRed(q)); channel_distortion[RedChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(GetPixelGreen(p)-(MagickRealType) GetPixelGreen(q)); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q)); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType) GetPixelOpacity(q)); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(GetPixelIndex(indexes+x)- (MagickRealType) GetPixelIndex(reconstruct_indexes+x)); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; MagickBooleanType status; MagickOffsetType progress; MagickRealType area; register ssize_t i; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageChannelStatistics(image,exception); reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception); status=MagickTrue; progress=0; for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]=0.0; area=1.0/((MagickRealType) image->columns*image->rows-1); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) distortion[RedChannel]+=area*QuantumScale*(GetPixelRed(p)- image_statistics[RedChannel].mean)*(GetPixelRed(q)- reconstruct_statistics[RedChannel].mean); if ((channel & GreenChannel) != 0) distortion[GreenChannel]+=area*QuantumScale*(GetPixelGreen(p)- image_statistics[GreenChannel].mean)*(GetPixelGreen(q)- reconstruct_statistics[GreenChannel].mean); if ((channel & BlueChannel) != 0) distortion[BlueChannel]+=area*QuantumScale*(GetPixelBlue(p)- image_statistics[BlueChannel].mean)*(GetPixelBlue(q)- reconstruct_statistics[BlueChannel].mean); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]+=area*QuantumScale*( GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)* (GetPixelOpacity(q)- reconstruct_statistics[OpacityChannel].mean); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) distortion[BlackChannel]+=area*QuantumScale*( GetPixelIndex(indexes+x)- image_statistics[OpacityChannel].mean)*( GetPixelIndex(reconstruct_indexes+x)- reconstruct_statistics[OpacityChannel].mean); p++; q++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ for (i=0; i < (ssize_t) CompositeChannels; i++) { MagickRealType gamma; gamma=image_statistics[i].standard_deviation* reconstruct_statistics[i].standard_deviation; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); distortion[i]=QuantumRange*gamma*distortion[i]; } distortion[CompositeChannels]=0.0; if ((channel & RedChannel) != 0) distortion[CompositeChannels]+=distortion[RedChannel]* distortion[RedChannel]; if ((channel & GreenChannel) != 0) distortion[CompositeChannels]+=distortion[GreenChannel]* distortion[GreenChannel]; if ((channel & BlueChannel) != 0) distortion[CompositeChannels]+=distortion[BlueChannel]* distortion[BlueChannel]; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[CompositeChannels]+=distortion[OpacityChannel]* distortion[OpacityChannel]; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[CompositeChannels]+=distortion[BlackChannel]* distortion[BlackChannel]; distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/ GetNumberChannels(image,channel)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(GetPixelRed(p)-(double) GetPixelRed(q)); if (distance > channel_distortion[RedChannel]) channel_distortion[RedChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(GetPixelGreen(p)-(double) GetPixelGreen(q)); if (distance > channel_distortion[GreenChannel]) channel_distortion[GreenChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(GetPixelBlue(p)-(double) GetPixelBlue(q)); if (distance > channel_distortion[BlueChannel]) channel_distortion[BlueChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); if (distance > channel_distortion[OpacityChannel]) channel_distortion[OpacityChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reconstruct_indexes+x)); if (distance > channel_distortion[BlackChannel]) channel_distortion[BlackChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) if (channel_distortion[i] > distortion[i]) distortion[i]=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=20.0*log10((double) 1.0/sqrt( distortion[RedChannel])); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=20.0*log10((double) 1.0/sqrt( distortion[GreenChannel])); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlueChannel])); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=20.0*log10((double) 1.0/sqrt( distortion[OpacityChannel])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlackChannel])); distortion[CompositeChannels]=20.0*log10((double) 1.0/sqrt( distortion[CompositeChannels])); return(status); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=sqrt(distortion[RedChannel]); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=sqrt(distortion[GreenChannel]); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=sqrt(distortion[BlueChannel]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=sqrt(distortion[BlackChannel]); distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]); return(status); } MagickExport MagickBooleanType GetImageChannelDistortion(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); /* Get image distortion. */ length=CompositeChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositeChannels]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistrortion() compares the image channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the CompareImageChannels method is: % % double *GetImageChannelDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageChannelDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ImageSizeDiffers","`%s'",image->filename); return((double *) NULL); } /* Get image distortion. */ length=CompositeChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, CompositeChannels,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(Image *image, % const Image *reconstruct_image) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % */ MagickExport MagickBooleanType IsImagesEqual(Image *image, const Image *reconstruct_image) { CacheView *image_view, *reconstruct_view; ExceptionInfo *exception; MagickBooleanType status; MagickRealType area, maximum_error, mean_error, mean_error_per_pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; distance=fabs(GetPixelRed(p)-(double) GetPixelRed(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(GetPixelGreen(p)-(double) GetPixelGreen(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(GetPixelBlue(p)-(double) GetPixelBlue(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; if (image->matte != MagickFalse) { distance=fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reconstruct_indexes+x)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); (void) status; similarity_image=DestroyImage(similarity_image); return(distortion); } MagickExport Image *SimilarityImage(Image *image,const Image *reference, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { Image *similarity_image; similarity_image=SimilarityMetricImage(image,reference, RootMeanSquaredErrorMetric,offset,similarity_metric,exception); return(similarity_image); } MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference, const MetricType metric,RectangleInfo *offset,double *similarity_metric, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=1.0; if ((reference->columns > image->columns) || (reference->rows > image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse) { InheritException(exception,&similarity_image->exception); similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireCacheView(similarity_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if (similarity < *similarity_metric) { *similarity_metric=similarity; offset->x=x; offset->y=y; } SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange* similarity)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); return(similarity_image); }
GB_unop__identity_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_fp32 // op(A') function: GB_unop_tran__identity_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_fp32 ( uint64_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bitpath_monte_carlo copy.h
#ifndef BITEDGE_MONTE_CARLO_H_ #define BITEDGE_MONTE_CARLO_H_ #include <set> #include <stack> #include "../ugraph_io/ugraph_structures.h" #include "../utils/memory_monitor.h" #include "../utils/convergence_helper.h" #include "../ugraph_io/file_io.h" #include "../utils/globals.h" namespace CPU_ALGOS{ struct EdgeBits_T{ std::vector<bool*> bits; EdgeBits_T():bits(0){} }; int bitedge_traverse_run( uint source, uint target, std::vector<initial_vertex> graph, std::vector<EdgeBits_T> edgeBits, uint nEdges, int k); //void find_k_monte_carlo(Graph & graph) void find_k_bitedge_monte_carlo(std::vector<initial_vertex> & graph, uint nEdges, std::ifstream & stList) { std::cout << std::endl; std::cout << "Init Monte Carlo Sampling (Finding K)..." << std::endl; int num_reached = 0; int k = 0; double reliability; std::vector<double> reliability_k, reliability_j; double curr_avg_r = 2.0; double prev_avg_r = 3.0; double avg_r = 0.0; double diff_sq_sum = 0.0; bool write_flag = true; uint source, target; std::pair<uint, uint> source_target_pair; std::cout << std::endl << "Reading Source-Target file..." << std::endl; std::vector<std::pair<uint, uint>> source_target_pairs(0); uint nQuery = read_stlist(stList, source_target_pairs); memory_monitor mm = memory_monitor(); std::thread t1(&memory_monitor::update_peak_memory, std::ref(mm)); t1.detach(); while ( fabs(curr_avg_r - prev_avg_r) > ALGO_CONF::kReliabilityThreshold && k < ALGO_CONF::kMaximumRound) { /*k < k_limit*/ // Step up k k += ALGO_CONF::kKStepUp; std::cout << std::endl << "k = " << k << std::endl; int sumBits = k * nEdges; // Reset var reliability_k.clear(); //Generate K sampling possible world auto start_g = std::chrono::high_resolution_clock::time_point::max(); auto finish_g = std::chrono::high_resolution_clock::time_point::max(); start_g = std::chrono::high_resolution_clock::now(); int numVertices = graph.size(); std::vector<EdgeBits_T> edgeBits(numVertices); int edgeInx = 0; for(int v = 0; v < numVertices; v++){ uint vdegree = graph.at(v).nbrs.size(); if( vdegree != 0 ){ for( uint inbr = 0; inbr < vdegree; inbr++){ edge ee = graph.at(v).nbrs.at(inbr).edgeValue; edgeInx ++; bool temp_bits[k]; for(int i=0; i < k; i++){ if ( check_exist( ee.probability.at(0)) ){ temp_bits[i] = true; }else{ temp_bits[i] = false; } } edgeBits.at(v).bits.push_back(temp_bits); } } } finish_g = std::chrono::high_resolution_clock::now(); auto duration_g = std::chrono::duration_cast<std::chrono::milliseconds>(finish_g - start_g).count(); std::cout << "Sampling Possible World time = " << duration_g << " ms" << std::endl; for (size_t i = 0; i < source_target_pairs.size(); i++) { source_target_pair = source_target_pairs[i]; source = source_target_pair.first; target = source_target_pair.second; // Reset var reliability_j.clear(); diff_sq_sum = 0.0; write_flag = true; for (int j = 0; j < ALGO_CONF::kRepeatForVariance; j++) { std::cout << j << "th iteration" << std::endl; // Reset initial conditions num_reached = 0; // Start time auto start = std::chrono::high_resolution_clock::time_point::max(); auto finish = std::chrono::high_resolution_clock::time_point::max(); start = std::chrono::high_resolution_clock::now(); mm.start_monitoring(); //Traverse K possible world auto start_t = std::chrono::high_resolution_clock::time_point::max(); auto finish_t = std::chrono::high_resolution_clock::time_point::max(); start_t = std::chrono::high_resolution_clock::now(); #pragma omp parallel for num_threads(1) //for (int i = 0; i < k; i++) { // kKStep for controling K sampling world num_reached = num_reached + bitedge_traverse_run(source, target, graph, edgeBits, nEdges, k); //} finish_t = std::chrono::high_resolution_clock::now(); auto duration_t = std::chrono::duration<double, std::milli> (finish_t - start_t).count(); std::cout << "Traversal time = " << duration_t << " ms" << std::endl; std::cout<< "num_reached: "<< num_reached << std::endl; // Calculate reliability reliability = num_reached / (double)k; // Stop time finish = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration<double, std::milli> (finish - start).count(); std::cout << "Current utilize : K=" << k << " possible world." << std::endl; std::cout << "Reliability Estimator, R^ (" << source << ", " << target << ") = " << reliability << std::endl; std::cout << "Execution time = " << duration << " ms" << std::endl << std::endl; if (write_flag) { append_results_to_file(k, reliability, duration, mm.get_peak_memory(), "MonteCarlo_k_" + std::to_string(i) + "_"+ std::to_string(numVertices)+ ".csv"); write_flag = false; } // Add r to vector reliability_j.push_back(reliability); } // Add r to vector of r reliability_k.push_back(reliability); // Variance calculation avg_r = convergence_helper::get_avg_reliability(reliability_j); for (int j = 0; j < ALGO_CONF::kRepeatForVariance; j++) { auto difference_sq = pow(reliability_j[j] - avg_r, 2); diff_sq_sum += difference_sq; } append_results_to_file(k, diff_sq_sum / (ALGO_CONF::kRepeatForVariance - 1), 0, i, "MC_variance_" + std::to_string(numVertices)+".csv"); } // Calulate avg r prev_avg_r = curr_avg_r; curr_avg_r = convergence_helper::get_avg_reliability(reliability_k); } mm.stop_monitoring(); } int bitedge_traverse_run( uint source, uint target, std::vector<initial_vertex> graph, std::vector<EdgeBits_T> edgeBits, uint nEdges, int k) { std::stack<uint> worklist; std::stack<uint> exploring; uint v, w; int reached = 0; int nVertices = graph.size(); int path_num = 0; worklist.push(source); if ( graph.at(v).nbrs.size() != 0) { for( uint i = 0; i < graph.at(v).nbrs.size(); i++){ exploring.push(graph.at(v).nbrs.at(i).tgtIndex); } }else { return 0; } while( !worklist.empty()){ v = exploring.top(); exploring.pop(); uint vdegree = graph.at(v).nbrs.size(); if ( vdegree != 0) { for( uint i = 0; i < vdegree; i++){ } } } // Add source in worklist worklist.push(source); explored.insert(source); if (source == target) { return 1; } while (!worklist.empty()) { v = worklist.front(); worklist.pop(); // T -> S: Iterate through all ingoing edges from s -> t uint vdegree = graph.at(v).nbrs.size(); if ( vdegree != 0) { for( uint i = 0; i < vdegree; i++){ w = graph.at(v).nbrs.at(i).tgtIndex; bool * ebits = edgeBits.at(v).bits.at(i); if(w == target){ for(int i =0; i< k; i++){ if( ebits[i] == true ){ reached ++; } } } if(explored.count(w) == 0){ } // Sample to check if edge (v,w) exists if ( check_exist( ee.probability.at(0)) ){ if ( w == target ) { reached ++; } if (explored.count(w) == 0) { worklist.push(w); explored.insert(w); } } else { // Edge does not exist } } } } // Target not found return 0; } } #endif
GB_binop__hypot_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp64) // C=scalar+B GB (_bind1st__hypot_fp64) // C=scalar+B' GB (_bind1st_tran__hypot_fp64) // C=A+scalar GB (_bind2nd__hypot_fp64) // C=A'+scalar GB (_bind2nd_tran__hypot_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = hypot (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = hypot (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_HYPOT || GxB_NO_FP64 || GxB_NO_HYPOT_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__hypot_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__hypot_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__hypot_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__hypot_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__hypot_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = hypot (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__hypot_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = hypot (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = hypot (x, aij) ; \ } GrB_Info GB (_bind1st_tran__hypot_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = hypot (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(8*t1+Nx+13,64)),floord(16*t2+Nx+12,64)),floord(24*t3+Nx+20,64)),floord(16*t1-16*t2+Nz+Nx+11,64));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),64*t4+62),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
threadPrivate.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int a, b, i, tid; float x; int z[10]; #pragma omp threadprivate(a, x, z) int main (int argc, char * argv[]){ /* Explicitly turn off dynamic threads */ //omp_set_dynamic(0); //z[0] = 0; printf("1st Parallel Region:\n"); #pragma omp parallel private(b,tid) { tid = omp_get_thread_num(); a = tid + 7; b = tid + 5; x = 1.1 * tid +1.0; z[0] = 9; printf("1Thread %d: a,b,x,z= %d %d %f %d\n",tid,a,b,x,z[0]); } /* end of parallel section */ printf("************************************\n"); printf("Master thread doing serial work here\n"); printf("************************************\n"); printf("2nd Parallel Region:\n"); #pragma omp parallel private(tid) { tid = omp_get_thread_num(); printf("2Thread %d: a,b,x,z= %d %d %f %d\n",tid,a,b,x,z[0]); } /* end of parallel section */ }
convert.h
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #pragma once #include "inner.h" namespace math21 { template<typename VecType1, typename VecType2> void math21_convert_container_to_container(const VecType1 &A, VecType2 &B) { MATH21_ASSERT(A.size() == B.size()) NumN n = A.size(); for (NumN i = 1; i <= n; ++i) { B(i) = A(i); } } template<typename T, template<typename> class Container, typename VecType_std> void math21_convert_container_2_std_vec(const Container<T> &A, VecType_std &B) { MATH21_ASSERT(A.size() == B.size()) NumN n = A.size(); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { B[i - 1] = A(i); } } template<typename T, template<typename> class Container, typename VecType_std> void math21_convert_std_vec_2_container(const VecType_std &A, Container<T> &B) { MATH21_ASSERT(A.size() == B.size()) NumN n = A.size(); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { B(i) = A[i - 1]; } } template<typename T> void math21_c_array_set(NumN n, const T *A, T *B) { NumN i; #pragma omp parallel for for (i = 0; i < n; ++i) { B[i] = A[i]; } } // prone to misuse template<typename T, typename T2> void math21_c_array_convert(NumN n, const T *A, T2 *B) { NumN i; #pragma omp parallel for for (i = 0; i < n; ++i) { B[i] = (T2)A[i]; } } template<typename StdVecType1, typename StdVecType2> void math21_convert_stdvec_2_stdvec(const StdVecType1 &A, StdVecType2 &B) { MATH21_ASSERT(A.size() == B.size()) NumN n = A.size(); //#pragma omp parallel for for (NumN i = 0; i < n; ++i) { B[i] = A[i]; } } template<typename StdVecType> void math21_tool_copy_stdvec_2_stdvec(const StdVecType &A, StdVecType &B) { NumN n = A.size(); if (n == 0) { return; } if (B.size() != n) { B.resize(n); } //#pragma omp parallel for for (NumN i = 0; i < n; ++i) { B[i] = A[i]; } } }
edgebased_levelset_substep.h
// Kratos Multi-Physics // // Copyright (c) 2015, Pooyan Dadvand, Riccardo Rossi, CIMNE (International Center for Numerical Methods in Engineering) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // // - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the distribution. // - All advertising materials mentioning features or use of this software must display the following acknowledgement: // This product includes Kratos Multi-Physics technology. // - Neither the name of the CIMNE nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED ANDON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THISSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 16:24:38 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED) #define KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED // #define DEBUG_OUTPUT //#define SPLIT_OSS // #define SYMM_PRESS // System includes #include <string> #include <iostream> #include <algorithm> // #include <omp.h> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "free_surface_application.h" #include "custom_utilities/edge_data_c2c.h" namespace Kratos { template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver> class EdgeBasedLevelSetSubstep { public: //name for the self defined structure typedef EdgesStructureTypeC2C<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //defining matrix type for test calculations typedef vector< array_1d<double, TDim> > CalcVectorType; //defining type for local storage of nodal values typedef vector<double> ValuesVectorType; //defining types for matrix operations typedef typename TSparseSpace::MatrixType TSystemMatrixType; typedef typename TSparseSpace::VectorType TSystemVectorType; typedef std::size_t SizeType; //constructor and destructor EdgeBasedLevelSetSubstep (MatrixContainer& mr_matrix_container, ModelPart& mr_model_part, const double viscosity, const double density, const Vector body_force, bool use_mass_correction, double edge_detection_angle, double stabdt_pressure_factor, double stabdt_convection_factor, double tau2_factor, bool assume_constant_dp ) : mr_matrix_container (mr_matrix_container), mr_model_part (mr_model_part), mstabdt_pressure_factor (stabdt_pressure_factor), mstabdt_convection_factor (stabdt_convection_factor), medge_detection_angle (edge_detection_angle), mtau2_factor (tau2_factor), massume_constant_dp (assume_constant_dp) { for (ModelPart::NodesContainerType::iterator it=mr_model_part.NodesBegin(); it!=mr_model_part.NodesEnd(); it++) it->FastGetSolutionStepValue (VISCOSITY) = viscosity; mMolecularViscosity = viscosity; // mViscosity = viscosity; noalias (mBodyForce) = body_force; mRho = density; mdelta_t_avg = 1000.0; max_dt = 1.0; muse_mass_correction = use_mass_correction; mshock_coeff = 0.7; mWallLawIsActive = false; mnumsubsteps=5; mmax_dt = 0.0; mcorner_coefficient = 30.0; //50.0; medge_coefficient = 2.0; //30.0; //10.0; // for (unsigned int i = 0; i < TDim; i++) mBodyForce[i] = 0; // mBodyForce[1] = -9.81; // // mRho = 1000.0; std::cout << "Edge based level set substep solver is created" << std::endl; }; ~EdgeBasedLevelSetSubstep() { }; void SetBodyForce( const Vector& body_force) { noalias(mBodyForce) = body_force; KRATOS_WATCH(mBodyForce); } //*********************************** //function to initialize fluid solver void Initialize ( ) { KRATOS_TRY //get number of nodes unsigned int n_nodes = mr_model_part.Nodes().size(); unsigned int n_edges = mr_matrix_container.GetNumberEdges(); //size data vectors mViscosity.resize (n_nodes); mr_matrix_container.SetToZero (mViscosity); mWork.resize (n_nodes); mr_matrix_container.SetToZero (mWork); mvel_n.resize (n_nodes); mr_matrix_container.SetToZero (mvel_n); mvel_n1.resize (n_nodes); mr_matrix_container.SetToZero (mvel_n1); mPn.resize (n_nodes); mr_matrix_container.SetToZero (mPn); mPn1.resize (n_nodes); mr_matrix_container.SetToZero (mPn1); mHmin.resize (n_nodes); mr_matrix_container.SetToZero (mHmin); mHavg.resize (n_nodes); mr_matrix_container.SetToZero (mHavg); mNodalFlag.resize (n_nodes); mr_matrix_container.SetToZero (mNodalFlag); mdistances.resize (n_nodes); mr_matrix_container.SetToZero (mdistances); mTauPressure.resize (n_nodes); mr_matrix_container.SetToZero (mTauPressure); mTauConvection.resize (n_nodes); mr_matrix_container.SetToZero (mTauConvection); mTau2.resize (n_nodes); mr_matrix_container.SetToZero (mTau2); mPi.resize (n_nodes); mr_matrix_container.SetToZero (mPi); mXi.resize (n_nodes); mr_matrix_container.SetToZero (mXi); mx.resize (n_nodes); mr_matrix_container.SetToZero (mx); mEdgeDimensions.resize (n_edges); mr_matrix_container.SetToZero (mEdgeDimensions); //convection variables mBeta.resize (n_nodes); mr_matrix_container.SetToZero (mBeta); mPiConvection.resize (n_nodes); mr_matrix_container.SetToZero (mPiConvection); mphi_n.resize (n_nodes); mr_matrix_container.SetToZero (mphi_n); mphi_n1.resize (n_nodes); mr_matrix_container.SetToZero (mphi_n1); mEps.resize (n_nodes); mr_matrix_container.SetToZero (mEps); // mD.resize(n_nodes); // mr_matrix_container.SetToZero(mD); mA.resize (n_nodes); mr_matrix_container.SetToZero (mA); mB.resize (n_nodes); mr_matrix_container.SetToZero (mB); mdiv_error.resize (n_nodes); mr_matrix_container.SetToZero (mdiv_error); mWallReductionFactor.resize (n_nodes); mr_matrix_container.SetToZero (mWallReductionFactor); mdiag_stiffness.resize (n_nodes); mr_matrix_container.SetToZero (mdiag_stiffness); mis_slip.resize (n_nodes); mis_visited.resize (n_nodes); macc.resize (n_nodes); mr_matrix_container.SetToZero (macc); // ValuesVectorType external_pressure; // external_pressure.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() ); mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); mr_matrix_container.FillCoordinatesFromDatabase (mx, mr_model_part.Nodes() ); //set flag for first time step mFirstStep = true; //loop to categorize boundary nodes std::vector< unsigned int> tempFixedVelocities; std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues; std::vector< unsigned int> tempPressureOutletList; std::vector< unsigned int> tempDistanceList; for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { int index = inode->FastGetSolutionStepValue (AUX_INDEX); if (inode->IsFixed (VELOCITY_X) ) //note that the variables can be either all fixed or no one fixed { if (inode->IsFixed (VELOCITY_Y) == false || inode->IsFixed (VELOCITY_Z) == false) { std::cout << "error found on the fixity of node " << inode->Id() << std::endl; KRATOS_THROW_ERROR (std::logic_error, "velocities can be either all fixed or none fixed", "") } tempFixedVelocities.push_back (index); tempFixedVelocitiesValues.push_back (mvel_n1[index]); } if (inode->IsFixed (DISTANCE) ) tempDistanceList.push_back (index); if (inode->IsFixed (PRESSURE) ) { tempPressureOutletList.push_back (index); // mPressureOutlet.push_back(external_pressure[index]); } } mFixedVelocities.resize (tempFixedVelocities.size(),false); mFixedVelocitiesValues.resize (tempFixedVelocitiesValues.size(),false); mPressureOutletList.resize (tempPressureOutletList.size(),false); mDistanceBoundaryList.resize (tempDistanceList.size(),false); mDistanceValuesList.resize (tempDistanceList.size(),false); #pragma omp parallel for for (int i=0; i<static_cast<int> (tempFixedVelocities.size() ); i++) { mFixedVelocities[i] = tempFixedVelocities[i]; mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i]; } #pragma omp parallel for for (int i=0; i<static_cast<int> (tempPressureOutletList.size() ); i++) { mPressureOutletList[i] = tempPressureOutletList[i]; } for (int i=0; i<static_cast<int> (tempDistanceList.size() ); i++) { mDistanceBoundaryList[i] = tempDistanceList[i]; } //compute slip normals and fill SlipList CalculateNormals (mr_model_part.Conditions() ); mr_matrix_container.WriteVectorToDatabase (NORMAL, mSlipNormal, mr_model_part.Nodes() ); if (TDim == 3) DetectEdges3D (mr_model_part.Conditions() ); //print number of nodes corresponding to the different types of boundary conditions // KRATOS_WATCH(mFixedVelocities.size()) // KRATOS_WATCH(mPressureOutletList.size()) // KRATOS_WATCH(mSlipBoundaryList.size()) //determine number of edges and entries unsigned int n_nonzero_entries = 2 * n_edges + n_nodes; //allocate memory for variables mL.resize (n_nodes, n_nodes, n_nonzero_entries); int number_of_threads= OpenMPUtils::GetNumThreads(); std::vector<int> row_partition (number_of_threads); OpenMPUtils::DivideInPartitions (n_nodes,number_of_threads,row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (int i_node = static_cast<int> (row_partition[k]); i_node < static_cast<int> (row_partition[k + 1]); i_node++) { //loop over all nodes // for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { //flag for considering diagonal matrix elements bool flag = 0; //loop over all neighbours for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; //define matrix structure row by row (the order does matter!) if ( (static_cast<int> (j_neighbour) > i_node) && (flag == 0) ) { //add diagonal/nodal contribution mL.push_back (i_node, i_node, 0.0); flag = 1; } //add non-diagonal/edge contribution mL.push_back (i_node, j_neighbour, 0.0); } //if diagonal element is the last non-zero element of the row if (flag == 0) mL.push_back (i_node, i_node, 0.0); } } } //compute minimum length of the surrounding edges CalculateEdgeLengths (mr_model_part.Nodes() ); //set the pressure projection to the body force value array_1d<double,3> temp = mRho * mBodyForce; for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) inode->FastGetSolutionStepValue (PRESS_PROJ) = temp; mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); //verify that neither h_min nor havg are 0 for (unsigned int i_node=0; i_node<mHmin.size(); i_node++) { if (mHmin[i_node] < 1e-20) KRATOS_THROW_ERROR ( std::logic_error,"hmin too small on node ",i_node+1) if (mHavg[i_node] < 1e-20) KRATOS_THROW_ERROR ( std::logic_error,"havg too small on node ",i_node+1) if (mHmin[i_node] > 1e20) KRATOS_THROW_ERROR ( std::logic_error,"hmin too big on node ",i_node+1) if (mHavg[i_node] > 1e20) KRATOS_THROW_ERROR ( std::logic_error,"havg too big on node ",i_node+1) } for (ModelPart::ElementsContainerType::iterator it=mr_model_part.ElementsBegin(); it!=mr_model_part.ElementsEnd(); it++) { if (it->Id() < 1) { KRATOS_THROW_ERROR (std::logic_error, "Element found with Id 0 or negative","") } double elem_vol = 0.0; if (TDim == 2) elem_vol = it->GetGeometry().Area(); else elem_vol = it->GetGeometry().Volume(); if (elem_vol <= 0) { std::cout << "error on element -> " << it->Id() << std::endl; KRATOS_THROW_ERROR (std::logic_error, "Area can not be lesser than 0","") } } KRATOS_CATCH ("") } void SetShockCapturingCoefficient (double coeff) { mshock_coeff = coeff; } void GatherValues() { KRATOS_TRY mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes()); mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); KRATOS_CATCH("") } //*************************************** //function to set adequate time step size double ComputeTimeStep (const double CFLNumber, const double MaxDt) { KRATOS_TRY //save the maximum time step max_dt = MaxDt; //local variable for time step size //getting value of current velocity and of viscosity // mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes()); // // mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); // mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); // double delta_t_i = delta_t; //******************* //loop over all nodes int n_nodes = static_cast<int>(mvel_n1.size()); unsigned int n_proc = OpenMPUtils::GetNumThreads(); Vector dt_avg_vec(n_proc,1e10); Vector dt_vec(n_proc,1e10); Vector dt_avg_novisc_vec(n_proc,1e10); #pragma omp parallel for firstprivate(n_nodes) for (int i_node = 0; i_node < n_nodes; i_node++) { unsigned int my_id = OpenMPUtils::ThisThread(); double& delta_t = dt_vec[my_id]; double& mdelta_t_avg = dt_avg_vec[my_id]; double& delta_t_avg_novisc = dt_avg_novisc_vec[my_id]; const array_1d<double, TDim>& v_i = mvel_n1[i_node]; const double havg_i = mHavg[i_node]; const double hmin_i = mHmin[i_node]; const double eps_i = mEps[i_node]; //const double d_i = mD[i_node]; double nu = mViscosity[i_node]; // const double lindarcy_i = mA[i_node]; // const double nonlindarcy_i = mB[i_node]; double vel_norm = norm_2 (v_i); //double porosity_coefficient = ComputePorosityCoefficient(nu, vel_norm, eps_i, d_i); // double porosity_coefficient = ComputePorosityCoefficient( vel_norm, eps_i, lindarcy_i, nonlindarcy_i); vel_norm /= eps_i; //use CFL condition to compute time step size double delta_t_i = 1.0 / (vel_norm /hmin_i + nu / (hmin_i * hmin_i) /*+ porosity_coefficient*/); double delta_t_i_avg = 1.0 / (vel_norm /havg_i + nu / (havg_i * havg_i) /*+ porosity_coefficient*/); double delta_t_i_avg_novisc = 1.0 / (2.0 * vel_norm /havg_i ); //considering the most restrictive case of neighbor's velocities with similar direction but opposite sense. //loop over all neighbours for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour]; double v_diff_norm = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) { double temp = v_i[l_comp] - v_j[l_comp]; v_diff_norm += temp*temp; } v_diff_norm = sqrt (v_diff_norm); v_diff_norm /= eps_i; double delta_t_j = 1.0 / (v_diff_norm /havg_i + 4.0 * nu / (havg_i * havg_i) ); // double delta_t_j = 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i) ); double delta_t_j_avg_novisc = 1.0 / (2.0 * v_diff_norm /havg_i ); if (delta_t_j < delta_t_i) delta_t_i = delta_t_j; if (delta_t_j_avg_novisc < delta_t_i_avg_novisc) delta_t_i_avg_novisc = delta_t_j_avg_novisc; // if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0)) // { // double delta_t_j = CFLNumber * 1.0 / (2.0 * norm_2(v_diff) /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i)); //// double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par)) / mHmin[i_node] + 2.0 * mViscosity / (mHmin[i_node] * mHmin[i_node])); // // KRATOS_WATCH(delta_t_j); // // KRATOS_WATCH(delta_t_i); // if (delta_t_j < delta_t_i) // delta_t_i = delta_t_j; // } } //choose the overall minimum of delta_t_i if (delta_t_i < delta_t) delta_t = delta_t_i; if (delta_t_i_avg < mdelta_t_avg) mdelta_t_avg = delta_t_i_avg; if (delta_t_i_avg_novisc < delta_t_avg_novisc) delta_t_avg_novisc = delta_t_i_avg_novisc; } //finalizing parallel computations double delta_t = dt_vec[0]; mdelta_t_avg = dt_avg_vec[0]; double delta_t_avg_novisc = dt_avg_novisc_vec[0]; for(unsigned int i=1; i<dt_vec.size(); i++) { if(delta_t > dt_vec[i]) delta_t = dt_vec[i]; if(mdelta_t_avg > dt_vec[i]) mdelta_t_avg = dt_avg_vec[i]; if(delta_t_avg_novisc > dt_vec[i]) delta_t_avg_novisc = dt_avg_novisc_vec[i]; } //take into account wall law in the estimation // int slip_size = mSlipBoundaryList.size(); // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double nu = mViscosity[i_node]; // // double delta_t_i = 0.25*mY_wall*mY_wall/nu; // // // Reducing wall friction for the large element near wall. Pooyan. // double reducing_factor = 1.00; // double h_min = mHavg[i_node]; // if(mY_wall < h_min) // reducing_factor = mY_wall / h_min; // delta_t_i /= reducing_factor; // // if (delta_t_i < delta_t) // delta_t = delta_t_i; // } // mdelta_t_avg = delta_t; //this should not be done ... remove it or decide what to do... delta_t_avg_novisc *= CFLNumber; // mnumsubsteps = ceil (delta_t_avg_novisc/delta_t); // mnumsubsteps += 1; //this is for security // delta_t *= CFLNumber; if (mnumsubsteps <= 1) { mnumsubsteps=1; delta_t_avg_novisc = delta_t; } //std::cout << "mdelta_t_avg =" << mdelta_t_avg <<std::endl; //std::cout << "delta_t =" << delta_t <<std::endl; //std::cout << "mnumsubsteps =" << mnumsubsteps <<std::endl; delta_t = delta_t_avg_novisc; // delta_t *= CFLNumber; //******************* //perform MPI syncronization of the dt (minimum should be kept) return delta_t; KRATOS_CATCH ("") } void ApplySmagorinsky (double MolecularViscosity, double Cs) { if (Cs != 0) { if (TDim == 3) ApplySmagorinsky3D (MolecularViscosity, Cs); else KRATOS_THROW_ERROR (std::logic_error,"smagorinsky not yet implemented in 2D",""); } } void UpdateFixedVelocityValues() { KRATOS_TRY //read velocity and pressure data from Kratos // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); int fixed_size = mFixedVelocities.size(); #pragma omp parallel for firstprivate(fixed_size) for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) { unsigned int i_node = mFixedVelocities[i_velocity]; array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity]; const array_1d<double, TDim>& u_i = mvel_n1[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) u_i_fix[comp] = u_i[comp]; } KRATOS_CATCH (""); } //********************************************************************************** //function to solve fluid equations - fractional step 1: compute fractional momentum void SolveStep1() { KRATOS_TRY //PREREQUISITES //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables CalcVectorType rhs; rhs.resize (n_nodes); //read velocity and pressure data from Kratos // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); // mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, rNodes); // mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, rNodes); // mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, rNodes); // mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, rNodes); mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; //compute intrinsic time double time_inv_avg = 1.0/mdelta_t_avg; // if(mmax_dt < mdelta_t_avg) mmax_dt = mdelta_t_avg; // double time_inv_avg = 1.0/mmax_dt; double stabdt_pressure_factor = mstabdt_pressure_factor; double stabdt_convection_factor = mstabdt_convection_factor; //double tau2_factor = mtau2_factor; #pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor) for (int i_node = 0; i_node < n_nodes; i_node++) { // double& h_i = mHavg[i_node]; double& h_avg_i = mHavg[i_node]; double& h_min_i = mHmin[i_node]; array_1d<double, TDim>& a_i = mvel_n1[i_node]; const double nu_i = mViscosity[i_node]; const double eps_i = mEps[i_node]; //const double d_i = mD[i_node]; const double lindarcy_i = mA[i_node]; const double nonlindarcy_i = mB[i_node]; double vel_norm = norm_2 (a_i); //double porosity_coefficient = ComputePorosityCoefficient(nu_i, vel_norm, eps_i, d_i); double porosity_coefficient = ComputePorosityCoefficient (vel_norm, eps_i, lindarcy_i, nonlindarcy_i); vel_norm /= eps_i; double tau = 1.0 / (2.0 * vel_norm / h_min_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient); double tau_conv = 1.0 / (2.0 * vel_norm / h_min_i + stabdt_convection_factor*time_inv_avg ); mTauPressure[i_node] = tau; mTauConvection[i_node] = tau_conv; // mTau2[i_node] = (nu_i + h_avg_i*vel_norm*0.5) *tau2_factor; } // //smoothen the tau press - mTau2 used as temp var // #pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double& tau = mTau2[i_node]; //****************** // tau = mTauPressure[i_node]; // double counter = 1.0; // //const double& p_i = pressure[i_node]; // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; // tau += mTauPressure[j_neighbour]; // counter+=1.0; // } // tau/=counter; // } // // mTauPressure = mTau2; //calculating the convective projection #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPi[i_node]; //****************** //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] = 0.0; array_1d<double, TDim> a_i = mvel_n1[i_node]; const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const double& eps_i = mEps[i_node]; a_i /= eps_i; //const double& p_i = pressure[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim> a_j = mvel_n1[j_neighbour]; const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour]; const double& eps_j = mEps[j_neighbour]; a_j /= eps_j; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_ConvectiveContribution (pi_i, a_i, U_i, a_j, U_j); // edge_ij.Add_grad_p(pi_i, p_i, p_j); } // const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // pi_i[l_comp] *= m_inv; } int inout_size = mInOutBoundaryList.size(); //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < inout_size; i++) { unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; double projection_length = 0.0; //double Ain = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; } array_1d<double, TDim>& pi_i = mPi[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) pi_i[comp] += projection_length * U_i[comp] ; // } } #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPi[i_node]; const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] *= m_inv; } // //completing with boundary integrals // //loop over all faces // for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++) // { // //get geometry data of the face // Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); // // //reference for area normal of the face // array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL); // double A = norm_2(face_normal); // // unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX)); // // if(face_geometry[0].IsFixed(VELOCITY_X) && face_geometry[1].IsFixed(VELOCITY_X) && face_geometry[2].IsFixed(VELOCITY_X)) // { // // //KRATOS_WATCH(cond_it->Id()); // // if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == false) // //{ // const array_1d<double,TDim>& v_0 = mvel_n1[i_node0]; // const array_1d<double,TDim>& v_1 = mvel_n1[i_node1]; // const array_1d<double,TDim>& v_2 = mvel_n1[i_node2]; // double An0 = inner_prod(v_0,face_normal) / (A*mEps[i_node0]); // double An1 = inner_prod(v_1,face_normal) / (A*mEps[i_node1]); // double An2 = inner_prod(v_2,face_normal) / (A*mEps[i_node2]); // //KRATOS_WATCH(face_normal); // mPi[i_node0] -= ((2.0*An0+An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; // mPi[i_node1] -= ((An0+2.0*An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; // mPi[i_node2] -= ((An0+An1+2.0*An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; // } // //} // } // // // //calculating the convective projection // #pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // array_1d<double, TDim>& pi_i = mPi[i_node]; //****************** // const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // pi_i[l_comp] *= m_inv; // } // // // KRATOS_WATCH("step1 before rk loop") // KRATOS_WATCH(mnumsubsteps) // KRATOS_WATCH(mPn) // KRATOS_WATCH(mPn1) // KRATOS_WATCH(mPi) // KRATOS_WATCH(mvel_n1) // KRATOS_WATCH(mvel_n) #ifdef DEBUG_OUTPUT KRATOS_WATCH("before RK of step1 - new") double aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_oldv=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_oldv += inner_prod(mvel_n[i_node],mvel_n[i_node]); double aux_pi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_pi += inner_prod(mPi[i_node],mPi[i_node]); KRATOS_WATCH(inner_prod(mPn,mPn)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_oldv); KRATOS_WATCH(aux_pi); KRATOS_WATCH(inner_prod(mdistances,mdistances)); KRATOS_WATCH(inner_prod(mViscosity,mViscosity)); #endif CalcVectorType auxn = mvel_n; double n_substeps = mnumsubsteps+1; double reduced_it = 0; double energy_initial = 0.0; double energy_final = 1.0; //compute initial kinetic energy #pragma omp parallel for firstprivate(n_nodes) reduction(+:energy_initial) for (int i_node = 0; i_node < n_nodes; i_node++) if (mdistances[i_node] <= 0.0) energy_initial += mr_matrix_container.GetLumpedMass()[i_node] * inner_prod(mvel_n[i_node],mvel_n[i_node]); //KRATOS_WATCH(energy_initial) // KRATOS_WATCH(n_substeps) while(reduced_it++ < 2 ) { double delta_t_substep = delta_t/n_substeps; for (unsigned int substep = 0; substep<n_substeps; substep++) { //std::cout << "substep " << substep+1 << " of " << n_substeps << std::endl; mr_matrix_container.AssignVectorToVector (mvel_n, mWork); //mWork = mvel_n //first step of Runge Kutta mr_matrix_container.AssignVectorToVector (mvel_n, mvel_n1); //mvel_n1 = mvel_n mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness,rhs); Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); ApplyVelocityBC (mvel_n1); //second step mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); ApplyVelocityBC (mvel_n1); //third step mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); ApplyVelocityBC (mvel_n1); //fourth step mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); //compute right-hand side mr_matrix_container.AssignVectorToVector (mWork, mvel_n1); ApplyVelocityBC (mvel_n1); //prepare for next step mr_matrix_container.AssignVectorToVector (mvel_n1, mvel_n); } energy_final = 0.0; //compute initial kinetic energy #pragma omp parallel for firstprivate(n_nodes) reduction(+:energy_final) for (int i_node = 0; i_node < n_nodes; i_node++) if (mdistances[i_node] <= 0.0) energy_final += mr_matrix_container.GetLumpedMass()[i_node] * inner_prod(mvel_n1[i_node],mvel_n1[i_node]); //put back the original velocity at step n mr_matrix_container.AssignVectorToVector (auxn, mvel_n); if(energy_final < 1.5*energy_initial) break; else n_substeps*=10; if(reduced_it > 1) { KRATOS_WATCH(energy_initial) KRATOS_WATCH(energy_final) KRATOS_WATCH(n_substeps) } } // mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // KRATOS_WATCH("end of step1") // KRATOS_WATCH(mvel_n1) // KRATOS_WATCH(mvel_n) #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of step1 - new") aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_xi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_xi += inner_prod(mXi[i_node],mXi[i_node]); KRATOS_WATCH(inner_prod(mPn,mPn)); KRATOS_WATCH(inner_prod(mdistances,mdistances)); KRATOS_WATCH(inner_prod(mViscosity,mViscosity)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_xi); #endif KRATOS_CATCH ("") } //********************************************************************* //function to calculate right-hand side of fractional momentum equation void CalculateRHS ( const CalcVectorType& vel, const ValuesVectorType& pressure, const CalcVectorType& convective_velocity, CalcVectorType& rhs, ValuesVectorType& diag_stiffness) { KRATOS_TRY int n_nodes = vel.size(); //perform MPI syncronization //calculating the RHS array_1d<double, TDim> stab_low; array_1d<double, TDim> stab_high; double inverse_rho = 1.0 / mRho; #pragma omp parallel for private(stab_low,stab_high) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { const double nu_i = mViscosity[i_node]; const double nu_j = nu_i; array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& f_i = mBodyForce; array_1d<double, TDim> a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& pi_i = mPi[i_node]; const double& p_i = pressure[i_node]; const double& eps_i = mEps[i_node]; const double lindarcy_i = mA[i_node]; const double nonlindarcy_i = mB[i_node]; double edge_tau = mTauConvection[i_node]; a_i /= eps_i; //initializing with the external forces (e.g. gravity) double& m_i = mr_matrix_container.GetLumpedMass() [i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] = m_i * eps_i * f_i[comp] ; //applying the effect of the porosity double porosity_coefficient = ComputePorosityCoefficient ( norm_2 (U_i), eps_i, lindarcy_i, nonlindarcy_i); diag_stiffness[i_node]= m_i * porosity_coefficient; //std::cout << i_node << "rhs =" << rhs_i << "after adding body force" << std::endl; //convective term for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim> a_j = convective_velocity[j_neighbour]; const array_1d<double, TDim>& U_j = vel[j_neighbour]; const array_1d<double, TDim>& pi_j = mPi[j_neighbour]; const double& p_j = pressure[j_neighbour]; const double& eps_j = mEps[j_neighbour]; // const double& beta_j = mBeta[j_neighbour]; a_j /= eps_j; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Sub_ConvectiveContribution (rhs_i, a_i, U_i, a_j, U_j); //std::cout << i_node << "rhs =" << rhs_i << "after convective contrib" << std::endl; //take care! we miss including a B.C. for the external pressure //edge_ij.Add_Gp (rhs_i,p_i*inverse_rho,p_j*inverse_rho); edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho*eps_i, p_j * inverse_rho*eps_i); // edge_ij.Add_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho); //std::cout << i_node << "rhs =" << rhs_i << "after Gp" << std::endl; edge_ij.Sub_ViscousContribution (rhs_i, U_i, nu_i, U_j, nu_j); // edge_ij.Add_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j); //std::cout << i_node << "rhs =" << rhs_i << "after viscous" << std::endl; //add stabilization edge_ij.CalculateConvectionStabilization_LOW (stab_low, a_i, U_i, a_j, U_j); // edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i,p_i, a_j, U_j,p_j); edge_ij.CalculateConvectionStabilization_HIGH (stab_high, a_i, pi_i, a_j, pi_j); // double beta = 1.0; // double beta = beta_i; // if(beta_j > beta) // beta = beta_j; // beta = 1.0; // edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high); // edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high); edge_ij.Sub_StabContribution (rhs_i, edge_tau, 1.0, stab_low, stab_high); } // std::cout << i_node << "rhs =" << rhs_i << std::endl; } } int inout_size = mInOutBoundaryList.size(); //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < inout_size; i++) { unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; double projection_length = 0.0; double Ain = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; Ain += an_i[comp]*an_i[comp]; } array_1d<double, TDim>& rhs_i = rhs[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] += projection_length * U_i[comp] ; // } } /* for (int i = 0; i < mSlipBoundaryList.size(); i++) { int i_node = mSlipBoundaryList[i]; double dist = mdistances[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { const double& p_i = pressure[i_node]; const array_1d<double,3>& Ani = mSlipNormal[i_node]; array_1d<double, TDim>& rhs_i = rhs[i_node]; array_1d<double, TDim> temp; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) temp[l_comp] = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] <= 0.0 && mis_slip[j_neighbour] == true) { //const double& p_j = pressure[j_neighbour]; array_1d<double,3> Anj = mSlipNormal[j_neighbour]; Anj /= norm_2(Anj); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) temp[l_comp] += p_i*Anj[l_comp]; } } //take out part in the direction of Ani double Ai = norm_2(Ani); double aux = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) aux += temp[l_comp]*Ani[l_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) temp[l_comp] -= aux *Ani[l_comp] / (Ai*Ai); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) rhs_i[l_comp] -= 0.25*Ai*temp[l_comp]; } }*/ // KRATOS_WATCH("finished**************************************************") */ /* //correction to the pressure graient //loop over all faces CalcVectorType press_correction(vel.size()); mr_matrix_container.SetToZero(press_correction); // mr_matrix_container.SetToZero(slip_area); for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL); double A = norm_2(face_normal); unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX)); if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true) { const double& p_0 = pressure[i_node0]; const double& p_1 = pressure[i_node1]; const double& p_2 = pressure[i_node2]; //TODO: we should only keep the part orthogonal to the external normal on each node!!!! press_correction[i_node0] -= ((2.0*p_0+p_1+p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal; press_correction[i_node1] -= ((p_0+2.0*p_1+p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal; press_correction[i_node2] -= ((p_0+p_1+2.0*p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal; } else { const array_1d<double,TDim>& v_0 = vel[i_node0]; const array_1d<double,TDim>& v_1 = vel[i_node1]; const array_1d<double,TDim>& v_2 = vel[i_node2]; double An0 = inner_prod(v_0,face_normal) / (A*A); double An1 = inner_prod(v_1,face_normal) / (A*A); double An2 = inner_prod(v_2,face_normal) / (A*A); rhs[i_node0] -= ((2.0*An0+An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; rhs[i_node1] -= ((An0+2.0*An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; rhs[i_node2] -= ((An0+An1+2.0*An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; } } //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0 && mis_slip[i_node] == true) { array_1d<double, TDim>& rhs_i = rhs[i_node]; // array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // double normalization = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // normalization += an_i[comp] * an_i[comp]; // } // normalization = sqrt(normalization); array_1d<double,TDim>& press_corr_i = press_correction[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] += press_corr_i[comp]; //we should remove here the normal component!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! } } */ //apply wall resistance if (mWallLawIsActive == true) ComputeWallResistance (vel,diag_stiffness); // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, rNodes); KRATOS_CATCH ("") } //************************************************************************* //function to solve fluid equations - fractional step 2: calculate pressure int SolveStep2 (typename TLinearSolver::Pointer pLinearSolver) { KRATOS_TRY // typedef Node < 3 > PointType; // typedef PointerVector<PointType > PointVector; // typedef PointVector::iterator PointIterator; #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0; int layer_counter = -1; boost::numeric::ublas::vector<int> layers(mr_model_part.Nodes().size()); boost::numeric::ublas::vector<int> layer_limits(3); //Re-generate a container with LAYER 0 and LAYER 1 after convection of the free surface layer_limits[0] = 0; #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { if(mdistances[i_node] < 0.0) { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] >= 0.0 && mis_visited[i_node] == 0 ) { #pragma omp critical layers[++layer_counter] = i_node; mis_visited[i_node] = 1; break; } } } else mPn1[i_node] = 0.0; } layer_limits[1] = layer_counter; for(unsigned int i=0; i<static_cast<unsigned int>(layer_limits[1]); i++) { unsigned int i_node = layers[i]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if( mdistances[j_neighbour] >= 0.0 && mis_visited[j_neighbour] == 0) { layers[layer_counter++] = j_neighbour; mis_visited[j_neighbour] = 2; } } } layer_limits[2] = layer_counter; int return_value = 0; //on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0 #pragma omp parallel for for( int iii=static_cast<int>(layer_limits[1]); iii<static_cast<int>(layer_limits[2]); iii++) { unsigned int i_node = layers[iii]; array_1d<double, TDim> grad_d; for (unsigned int comp = 0; comp < TDim; comp++) grad_d[comp] = 0.0; double dist_i = mdistances[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& dist_j = mdistances[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (grad_d, dist_i, dist_j); } const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) grad_d[l_comp] *= m_inv; double norm_grad = norm_2 (grad_d); if (norm_grad < 2.0) { if(dist_i < 0.01*mHavg[i_node] ) dist_i = 0.0; else if(dist_i > 2.0*mHavg[i_node] ) { KRATOS_WATCH("distance is much larger than expected!!") dist_i = 2.0*mHavg[i_node]; } if(norm_grad > 0.001) { grad_d /= norm_grad; //this is the direction of the gradient of the distances grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface } else { KRATOS_WATCH("norm grad is very small!!!!") grad_d *= 0.0; } const array_1d<double, TDim>& press_grad = mXi[i_node]; //iii->FastGetSolutionStepValue (PRESS_PROJ); double pestimate = inner_prod (press_grad,grad_d); mPn1[i_node] = pestimate; // KRATOS_WATCH("peastimate step2") // KRATOS_WATCH(iii->Id()) // KRATOS_WATCH(grad_d) // KRATOS_WATCH(press_grad) // KRATOS_WATCH(pestimate) } else { std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl; return_value = -1; // return -1; double avg_number = 0.0; double pavg = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if( mis_visited[j_neighbour] == 1) { pavg += mPn1[j_neighbour]; avg_number += 1.0; } } if (avg_number == 0) KRATOS_THROW_ERROR (std::logic_error,"can not happen that the extrapolation node has no neighbours",""); mPn1[i_node] = pavg/avg_number; } } //if a node is very close to the free surface (relatively to the element size) fix the pressure on it // for(ModelPart::NodesContainerType::iterator iii = mr_model_part.NodesBegin(); iii!=mr_model_part.NodesEnd(); iii++) // { // unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX); // // double dist = mdistances[i_node]; // if(dist > 0.0 && dist < 0.01*mHavg[i_node]) // iii->FastGetSolutionStepValue(PRESSURE) = 0.0; // // } //PREREQUISITES //allocate memory for variables ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //unknown and right-hand side vector TSystemVectorType dp, rhs; dp.resize (n_nodes); rhs.resize (n_nodes); array_1d<double, TDim> dU_i, dU_j, work_array; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #ifdef _OPENMP // double time_inv = 0.0; //1.0/delta_t; //read the pressure projection from the database #endif // mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); // mr_matrix_container.FillVectorFromDatabase (PRESS_PROJ, mXi, rNodes); // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); //for (int i_node = 0; i_node < n_nodes; i_node++) // std::cout << mvel_n1[i_node] << std::endl; //loop over all nodes // double rho_inv = 1.0 / mRho; #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; rhs_i = 0.0; const double& p_i = mPn1[i_node]; const double& p_old_i = mPn[i_node]; const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node]; // const double& eps_i = mEps[i_node]; array_1d<double, TDim>& xi_i = mXi[i_node]; double l_ii = 0.0; // double div_i = 0.0; //loop over all neighbours for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& p_j = mPn1[j_neighbour]; const double& p_old_j = mPn[j_neighbour]; const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour]; const array_1d<double, TDim>& xi_j = mXi[j_neighbour]; // const double& eps_j = mEps[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; #ifdef SYMM_PRESS double edge_tau = 0.25* (mTauPressure[i_node] + mTauPressure[j_neighbour]); #else double edge_tau = 0.5*mTauPressure[i_node]; #endif // double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j); // if (edge_tau < delta_t) edge_tau=delta_t; //compute laplacian operator double sum_l_ikjk; edge_ij.CalculateScalarLaplacian (sum_l_ikjk); // double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau); double sum_l_ikjk_onlydt = sum_l_ikjk * (delta_t); sum_l_ikjk *= (delta_t + edge_tau); //assemble right-hand side //pressure contribution // rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i); rhs_i -= sum_l_ikjk * (p_j - p_i); rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i); //calculating the divergence of the fract vel // edge_ij.Sub_D_v(div_i, U_i_curr*mRho*eps_i, U_j_curr * mRho*eps_j); edge_ij.Sub_D_v (rhs_i, U_i_curr*mRho, U_j_curr * mRho); // edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i); //high order stabilizing term double temp = 0.0; // edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j); edge_ij.Add_div_v (temp, xi_i, xi_j); rhs_i += edge_tau * temp; //assemble laplacian matrix mL (i_node, j_neighbour) = sum_l_ikjk; l_ii -= sum_l_ikjk; } // //area correction to prevent mass loss // rhs_i -= mdiv_error[i_node]; // rhs_i += div_i * eps_i; mL (i_node, i_node) = l_ii; } if (muse_mass_correction == true) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; rhs_i -= mdiv_error[i_node]; } } //find the max diagonal term double max_diag = 0.0; for (int i_node = 0; i_node < n_nodes; i_node++) { double L_diag = mL (i_node, i_node); if (fabs (L_diag) > fabs (max_diag) ) max_diag = L_diag; } max_diag *= 1e10; // if (max_diag < 1e20) max_diag=1e20; //respect pressure boundary conditions by penalization // double huge = max_diag * 1e6; // for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { // unsigned int i_node = mPressureOutletList[i_pressure]; // mL(i_node, i_node) = huge; // rhs[i_node] = 0.0; // } for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; mL (i_node, i_node) = max_diag; rhs[i_node] = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; mL (i_node, j_neighbour) = 0.0; } } //modification for level_set // mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); // for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) // { // if(mdistances[i_dist] >= 0) // { // mL(i_dist, i_dist) = huge; // rhs[i_dist] = 0.0; // } // } #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { if (mdistances[i_node] >= 0) { mL (i_node, i_node) = max_diag; rhs[i_node] = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; mL (i_node, j_neighbour) = 0.0; } } else { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (mdistances[j_neighbour] >= 0) mL (i_node, j_neighbour) = 0.0; } } } // for (int i_node = 0; i_node < n_nodes; i_node++) // { // if( fabs(mL(i_node, i_node)) < 1e-20) // { // mL(i_node, i_node)=max_diag; // rhs[i_node] = 0.0; // KRATOS_WATCH("arghhhhhhhhhhhhhhhhhhhhhhhhhhhhhh"); // } // } //compute row scaling factors TSystemVectorType scaling_factors (n_nodes); double* Lvalues = mL.value_data().begin(); SizeType* Lrow_indices = mL.index1_data().begin(); SizeType* Lcol_indices = mL.index2_data().begin(); #pragma omp parallel for for (int k = 0; k < static_cast< int> (mL.size1() ); k++) { double t = 0.0; SizeType col_begin = Lrow_indices[k]; SizeType col_end = Lrow_indices[k+1]; for (SizeType j=col_begin; j<col_end; j++) if ( static_cast<int> (Lcol_indices[j]) == k) { t = fabs (Lvalues[j]); break; } // t += Lvalues[j]*Lvalues[j]; // t = sqrt(t); scaling_factors[k] = 1.0/sqrt (t); } #pragma omp parallel for for (int k = 0; k < static_cast<int> (mL.size1() ); k++) { SizeType col_begin = Lrow_indices[k]; SizeType col_end = Lrow_indices[k+1]; double k_factor = scaling_factors[k]; rhs[k] *= k_factor; for (SizeType j=col_begin; j<col_end; j++) { Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor; } } //set starting vector for iterative solvers #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) dp[i_node] = 0.0; //KRATOS_WATCH(rhs); //solve linear equation system L dp = rhs pLinearSolver->Solve (mL, dp, rhs); //KRATOS_WATCH(*pLinearSolver) //update pressure #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) mPn1[i_node] += dp[i_node]*scaling_factors[i_node]; // for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) // { // unsigned int i_node = mPressureOutletList[i_pressure]; // mPn1[i_node] = mPressureOutlet[i_pressure]; // } //write pressure and density to Kratos mr_matrix_container.WriteScalarToDatabase (PRESSURE, mPn1, rNodes); //compute pressure proj for the next step #pragma omp parallel for private(work_array) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& xi_i = mXi[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) xi_i[comp] = 0.0; double dist = mdistances[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { const double& p_i = mPn1[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& p_j = mPn1[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (xi_i, p_i, p_j); } const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) xi_i[l_comp] *= m_inv; } } mr_matrix_container.WriteVectorToDatabase (PRESS_PROJ, mXi, rNodes); // KRATOS_WATCH("end of step2") // KRATOS_WATCH(mPn) // KRATOS_WATCH(mPn1) // KRATOS_WATCH(mXi) #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of step2 - new") double aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_xi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_xi += inner_prod(mXi[i_node],mXi[i_node]); KRATOS_WATCH(inner_prod(mPn1,mPn1)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_xi); #endif return return_value; KRATOS_CATCH ("") } //********************************************************************************** //function to solve fluid equations - fractional step 3: correct fractional momentum void SolveStep3() { KRATOS_TRY //get number of nodes ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //define work array array_1d<double, TDim> correction; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; double factor = 0.5; if (massume_constant_dp == true) factor = 1.0; //compute end of step momentum double rho_inv = 1.0 / mRho; #pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { array_1d<double, TDim>& U_i_curr = mvel_n1[i_node]; double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor; // const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) correction[l_comp] = 0.0; //compute edge contributions dt*M^(-1)Gp for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; // edge_ij.Sub_grad_p(correction,delta_p_i,delta_p_j); edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j); // edge_ij.Add_grad_p(correction, delta_p_i, delta_p_j); //edge_ij.Add_Gp (correction,delta_p_i,delta_p_j); // edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j); } //compute prefactor // double coefficient = delta_t * m_inv; const double m = mr_matrix_container.GetLumpedMass() [i_node]; const double& d = mdiag_stiffness[i_node]; //correct fractional momentum for (unsigned int comp = 0; comp < TDim; comp++) { U_i_curr[comp] += delta_t / (m + delta_t*d) * correction[comp]; } } } // //imit acceleration // #pragma omp parallel for // for(int i_node = 0; i_node < n_nodes; i_node++) // { // array_1d<double,TDim>& acc = macc[i_node]; // array_1d<double,TDim>& v1 = mvel_n1[i_node]; // array_1d<double,TDim>& v = mvel_n[i_node]; // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // acc[l_comp] = (v1[l_comp] - v[l_comp])/delta_t; // // //limit accelerations to a maximum=100m/s/2 // const double max_acc = 200; // double acc_norm = norm_2(acc); // if(acc_norm > max_acc) // { // std::cout << "########################### acc norm " << acc_norm <<std::endl; // // acc *= max_acc/acc_norm; // v1 = v; // v1 += delta_t*acc; // } // } ApplyVelocityBC (mvel_n1); //save acceleration #pragma omp parallel for for(int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double,TDim>& acc = macc[i_node]; array_1d<double,TDim>& v1 = mvel_n1[i_node]; array_1d<double,TDim>& v = mvel_n[i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) acc[l_comp] = (v1[l_comp] - v[l_comp])/delta_t; } //write velocity of time step n+1 to Kratos //calculate the error on the divergence if (muse_mass_correction == true) { #pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { const double dist = mdistances[i_node]; double& div_i_err = mdiv_error[i_node]; div_i_err = 0.0; if (dist < 0.0) //node is inside domain ---- if outside do nothing { const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node]; //compute edge contributions dt*M^(-1)Gp for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_D_v (div_i_err, U_i_curr*mRho, U_j_curr * mRho); } } } } #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of step 3") double aux=0.0; for (int i_node = 0; i_node < n_nodes; i_node++) aux += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); KRATOS_WATCH(inner_prod(mPn1,mPn1)); KRATOS_WATCH(aux); #endif mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, rNodes); KRATOS_CATCH ("") } void ApplyDistanceBC() { KRATOS_TRY //slip condition int size = mDistanceBoundaryList.size(); #pragma omp parallel for firstprivate(size) for (int i_dist = 0; i_dist < size; i_dist++) { unsigned int i_node = mDistanceBoundaryList[i_dist]; double& dist = mdistances[i_node]; dist = mDistanceValuesList[i_dist]; } //fix the distance if velocity goes inwards // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double dist = mphi_n[i_node]; // // if(dist > 0.0) // // { // array_1d<double, TDim>& U_i = mvel_n1[i_node]; // array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // double projection_length = 0.0; // double normalization = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // projection_length += U_i[comp] * an_i[comp]; // } // if(projection_length > 0.0) // dist = mphi_n[i_node]; // // } // } KRATOS_CATCH ("") } //************************************ void ApplyVelocityBC (CalcVectorType& VelArray) { KRATOS_TRY // if(mWallLawIsActive == false) // { // std::cout << "applying corners condition" << std::endl; // apply conditions on corner edges // int edge_size = medge_nodes_direction.size(); // #pragma omp parallel for firstprivate(edge_size) // for (int i = 0; i < edge_size; i++) // { // int i_node = medge_nodes[i]; // const array_1d<double, TDim>& direction = medge_nodes_direction[i]; // double dist = mdistances[i_node]; // // if(dist <= 0.0) // { // array_1d<double, TDim>& U_i = VelArray[i_node]; // // for (unsigned int comp = 0; comp < TDim; comp++) // // U_i[comp] = 0.0; // // double temp=0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // temp += U_i[comp] * direction[comp]; // // for (unsigned int comp = 0; comp < TDim; comp++) // U_i[comp] = direction[comp]*temp; // } // } // // // // //apply conditions on corners // int corner_size = mcorner_nodes.size(); // for (int i = 0; i < corner_size; i++) // { // int i_node = mcorner_nodes[i]; // // array_1d<double, TDim>& U_i = VelArray[i_node]; // for (unsigned int comp = 0; comp < TDim; comp++) // U_i[comp] = 0.0; // } // //apply conditions on corners int corner_size = mcorner_nodes.size(); for (int i = 0; i < corner_size; i++) { int i_node = mcorner_nodes[i]; array_1d<double, TDim>& U_i = VelArray[i_node]; // if(mdistances[i_node] <= 0.0) // { array_1d<double, TDim> aux; for (unsigned int comp = 0; comp < TDim; comp++) aux[comp] = 0.0; double counter = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const double& dist_j = mdistances[j_neighbour]; array_1d<double, TDim>& vj = VelArray[j_neighbour]; if(dist_j <= 0 && mis_slip[j_neighbour] == false) { counter += 1.0; for (unsigned int comp = 0; comp < TDim; comp++) aux[comp] += vj[comp]; } } if(counter != 0.0) for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = aux[comp]/counter; // } } // } //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { array_1d<double, TDim>& U_i = VelArray[i_node]; array_1d<double, TDim>& an_i = mSlipNormal[i_node]; double projection_length = 0.0; double normalization = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; normalization += an_i[comp] * an_i[comp]; } projection_length /= normalization; //tangential momentum as difference between original and normal momentum for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] -= projection_length * an_i[comp]; } } // //loop over all faces // ValuesVectorType vel_correction(VelArray.size()); // // CalcVectorType slip_area(VelArray.size()); // int iterations = 10; // for(unsigned int i=0;i<iterations; i++) // { // mr_matrix_container.SetToZero(vel_correction); // // mr_matrix_container.SetToZero(slip_area); // for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++) // { // if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true) // { // //get geometry data of the face // Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); // // //reference for area normal of the face // array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL); // double n_area = norm_2(face_normal) / static_cast<double>(TDim); // // unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX)); // // const array_1d<double, TDim>& U_0 = VelArray[i_node0]; // const array_1d<double, TDim>& U_1 = VelArray[i_node1]; // const array_1d<double, TDim>& U_2 = VelArray[i_node2]; // // double vn0=0.0; // double vn1=0.0; // double vn2=0.0; // if(mdistances[i_node0] <= 0 && face_geometry[0].IsFixed(VELOCITY_X) == false) vn0 = inner_prod(U_0,face_normal); // if(mdistances[i_node1] <= 0 && face_geometry[1].IsFixed(VELOCITY_X) == false) vn1 = inner_prod(U_1,face_normal); // if(mdistances[i_node2] <= 0 && face_geometry[2].IsFixed(VELOCITY_X) == false) vn2 = inner_prod(U_2,face_normal); // // double edge01 = 0.5*(vn0+vn1)*0.333333333333333333333333333333*0.5; // double edge02 = 0.5*(vn0+vn2)*0.333333333333333333333333333333*0.5; // double edge12 = 0.5*(vn2+vn2)*0.333333333333333333333333333333*0.5; // // vel_correction[i_node0] += edge01 + edge02; // vel_correction[i_node1] += edge01 + edge12; // vel_correction[i_node2] += edge02 + edge12; // // /* double tmp = 0.333333333333333333333333333333333*0.333333333333333333333333333333333*(vn0+vn1+vn2); // vel_correction[i_node0] += tmp; // vel_correction[i_node1] += tmp; // vel_correction[i_node2] += tmp; */ // } // } // // //slip condition // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { // array_1d<double, TDim>& U_i = VelArray[i_node]; // array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // double normalization = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // normalization += an_i[comp] * an_i[comp]; // } // //tangential momentum as difference between original and normal momentum // double coeff = vel_correction[i_node] / normalization; // for (unsigned int comp = 0; comp < TDim; comp++) // U_i[comp] += coeff * an_i[comp]; // } // } // } //fixed condition int fixed_size = mFixedVelocities.size(); #pragma omp parallel for firstprivate(fixed_size) for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) { unsigned int i_node = mFixedVelocities[i_velocity]; double dist = mdistances[i_node]; if (dist <= 0.0) { const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity]; array_1d<double, TDim>& u_i = VelArray[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) u_i[comp] = u_i_fix[comp]; } } KRATOS_CATCH ("") } //******************************** //function to compute coefficients void ExtrapolateValues (unsigned int extrapolation_layers) { KRATOS_TRY //ensure that corner nodes are wet if all of the nodes around them have a negative distance // typedef Node < 3 > PointType; // typedef PointerVector<PointType > PointVector; // typedef PointVector::iterator PointIterator; mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances,mr_model_part.Nodes() ); #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0.0; boost::numeric::ublas::vector<int> layers(mr_model_part.Nodes().size(),-1); // std::vector<int> layer_color(mr_model_part.Nodes().size(),-1000); boost::numeric::ublas::vector<int> layer_limits(extrapolation_layers+1); layer_limits[0] = 0; int layer_counter = -1; #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>( mr_model_part.Nodes().size()); i_node++) { if(mdistances[i_node] < 0.0) { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] >= 0.0 && mis_visited[i_node] == 0) { #pragma omp critical layers[++layer_counter] = i_node; mis_visited[i_node] = 1; break; } } } else { mvel_n1[i_node] = ZeroVector (TDim); mvel_n[i_node] = ZeroVector (TDim); mPn[i_node] = 0.0; mPn1[i_node] = 0.0; mXi[i_node] = ZeroVector (TDim); } } layer_limits[1] = layer_counter; //fill the following layers by neighbour relationships //each layer fills the following for (unsigned int il = 0; il < extrapolation_layers - 1; il++) { //parallelization not trivial for(unsigned int iii = static_cast<unsigned int>(layer_limits[il]); iii<static_cast<unsigned int>(layer_limits[il+1]); iii++) { unsigned int i_node = layers[iii]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] >= 0.0 && mis_visited[j_neighbour] == 0) { layers[layer_counter++] = j_neighbour; mis_visited[j_neighbour] = il+2; } } } layer_limits[il+2] = layer_counter; } array_1d<double, TDim > aux, aux_proj; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //double delta_t = CurrentProcessInfo[DELTA_TIME]; //fill the pressure projection on the first layer inside the fluid //by extrapolating from the pressure projection on the layer -1 (the first layer completely inside the domain) #pragma omp parallel for for(int i=layer_limits[0]; i<layer_limits[1]; i++) { unsigned int i_node = layers[i]; noalias (aux_proj) = ZeroVector (TDim); double avg_number = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if( mis_visited[j_neighbour] == 0) { const array_1d<double, TDim > & inside_press_grad = mXi[j_neighbour]; noalias (aux_proj) += inside_press_grad; avg_number += 1.0; } } if (avg_number != 0.0) //this case means that it has some neighbours that are completely internal { aux_proj /= avg_number; noalias (mXi[i_node] ) = aux_proj; } else //case in which there is not a layer of nodes completely internal { array_1d<double,TDim>& xi = mXi[i_node]; noalias ( xi ) = mRho*mBodyForce; noalias ( xi ) -= mRho*macc[i_node]; } } //perform extrapolation layer by layer by making an average //of the neighbours of lower order /* KRATOS_WATCH(extrapolation_layers) for (unsigned int il = 0; il < extrapolation_layers; il++) std::cout << layer_limits[il] << " "; std::cout << std::endl; std::cout << std::endl; for (unsigned int il = 0; il < extrapolation_layers; il++) { std::cout << "level = " << il << " nneighb = " << layer_limits[il+1] - layer_limits[il] << " -- "; for(unsigned int iii = layer_limits[il]; iii<layer_limits[il+1]; iii++) std::cout << layers[iii] << " "; std::cout << std::endl; } std::cout << std::endl; std::cout << " printing is visited " << std::endl; for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++) std::cout << mis_visited[i_node] << std::endl; std::cout << std::endl;*/ for (int il = 1; il < static_cast<int>(extrapolation_layers); il++) { //parallelization of this loop not trivial for(int iii = layer_limits[il]; iii<layer_limits[il+1]; iii++) { unsigned int i_node = layers[iii]; noalias (aux) = ZeroVector (TDim); noalias (aux_proj) = ZeroVector (TDim); double avg_number = 0.0; double pavg = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if (mis_visited[j_neighbour] < (il + 1) && mis_visited[j_neighbour] != 0) { const array_1d<double, TDim >& direction_vec = mEdgeDimensions[csr_index]; // noalias (direction_vec) -= coords_bottom; const array_1d<double, TDim >& press_grad = mXi[j_neighbour]; //i->FastGetSolutionStepValue (PRESS_PROJ); double temp = inner_prod (direction_vec, press_grad); double pestimate = mPn[j_neighbour] + temp; pavg += pestimate; noalias (aux_proj) += press_grad; noalias (aux) += mvel_n1[j_neighbour]; //i->FastGetSolutionStepValue (VELOCITY); avg_number += 1.0; } } if (avg_number != 0.0) { aux /= avg_number; pavg /= avg_number; aux_proj /= avg_number; // KRATOS_WATCH(avg_number); // KRATOS_WATCH(aux); // KRATOS_WATCH(pavg); // KRATOS_WATCH(aux_proj); } else { KRATOS_THROW_ERROR (std::runtime_error, "error in extrapolation:: no neighbours find on a extrapolation layer -- impossible", ""); // KRATOS_THROW_ERROR(std:logic_error,"error in extrapolation:: no neighbours find on a extrapolation layer -- impossible",""); } mvel_n1[i_node] = aux; mvel_n[i_node] = aux; mPn[i_node] = pavg; // mPn1[i_node] = pavg; mXi[i_node] = aux_proj; } } //mark nodes on which we will have to solve for convection //mark all of internal nodes #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { if (mdistances[i_node] <= 0.0) mis_visited[i_node] = 1.0; else mis_visited[i_node] = 0.0; } //now mark all of the nodes up to the extrapolation layers - 1 for (unsigned int il = 0; il < extrapolation_layers-1; il++) { #pragma omp parallel for for( int iii = static_cast<int>(layer_limits[il]); iii<static_cast<int>(layer_limits[il+1]); iii++) { unsigned int i_node = layers[iii]; mis_visited[i_node] = 1.0; } } ApplyVelocityBC (mvel_n1); // mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // KRATOS_WATCH("end of Extrapolate Values ") // KRATOS_WATCH(mvel_n1) // KRATOS_WATCH(mPn) // KRATOS_WATCH(mPn1) // KRATOS_WATCH(mXi) // KRATOS_WATCH(mdistances) #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of extrapolate values - new") double aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_xi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_xi += inner_prod(mXi[i_node],mXi[i_node]); KRATOS_WATCH(inner_prod(mPn1,mPn1)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_xi); #endif KRATOS_CATCH ("") } void ChangeSignToDistance() { KRATOS_TRY for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue (DISTANCE); inode->FastGetSolutionStepValue (DISTANCE) = -dist; } KRATOS_CATCH ("") } void MarkNodesByDistance (double min, double max) { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { double& dist = mdistances[i_node]; if ( dist > min && dist < max ) mis_visited[i_node] = 1.0; else mis_visited[i_node] = 0.0; } KRATOS_CATCH ("") } void SaveScalarVariableToOldStep (Variable<double>& rVar) { KRATOS_TRY for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->FastGetSolutionStepValue (rVar, 1) = inode->FastGetSolutionStepValue (rVar); } KRATOS_CATCH ("") } void MarkExternalAndMixedNodes() { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0; for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++) { if(mdistances[i_node] > 0.0) { mis_visited[i_node] = 1; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; mis_visited[j_neighbour] = 1; } } } KRATOS_CATCH ("") } void MarkInternalAndMixedNodes() { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0; for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++) { if(mdistances[i_node] <= 0.0) { mis_visited[i_node] = 1; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; mis_visited[j_neighbour] = 1; } } } KRATOS_CATCH ("") } void MarkInternalNodes() { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { if(mdistances[i_node] <= 0.0) mis_visited[i_node] = 1; else mis_visited[i_node] = 0; } KRATOS_CATCH ("") } //************************************** //function to calculate the area normals void CalculateNormals (ModelPart::ConditionsContainerType& rConditions) { KRATOS_TRY //calculate area normals face-by-face array_1d<double, 3 > area_normal; //2D case if (TDim == 2) { for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) CalculateNormal2D (cond_it, area_normal); }//3D case else if (TDim == 3) { //help vectors for cross product array_1d<double, 3 > v1; array_1d<double, 3 > v2; for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) CalculateNormal3D (cond_it, area_normal, v1, v2); } // area_normal *= -1; //CHAPUZA: REMOVE!!!s //(re)initialize normals unsigned int n_nodes = mNodalFlag.size(); mInOutNormal.resize (n_nodes); mSlipNormal.resize (n_nodes); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { noalias (mSlipNormal[i_node]) = ZeroVector (TDim); mis_slip[i_node] = false; noalias (mInOutNormal[i_node]) = ZeroVector (TDim); } //loop over all faces const double node_factor = 1.0 / TDim; for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL); //slip condition if (static_cast<bool> (cond_it->GetValue (IS_STRUCTURE) ) == true) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue (AUX_INDEX) ); array_1d<double, TDim>& slip_normal = mSlipNormal[i_node]; mis_slip[i_node] = true; for (unsigned int comp = 0; comp < TDim; comp++) { slip_normal[comp] += node_factor * face_normal[comp]; } } } //fill the list of slip nodes std::vector< unsigned int> tempmSlipBoundaryList; for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { if (mis_slip[i_node] == true) tempmSlipBoundaryList.push_back (i_node); mis_slip[i_node] = false; } mSlipBoundaryList.resize (tempmSlipBoundaryList.size(),false); #pragma omp parallel for for (int i=0; i<static_cast<int> (tempmSlipBoundaryList.size() ); i++) mSlipBoundaryList[i] = tempmSlipBoundaryList[i]; //check that all of the normals are not zero for (int i=0; i<static_cast<int> (mSlipBoundaryList.size() ); i++) { unsigned int i_node = mSlipBoundaryList[i]; double tmp = norm_2(mSlipNormal[i_node]); if(tmp < 1e-20) KRATOS_THROW_ERROR(std::logic_error,"found a slip node with zero normal on node with id",i_node+1) } //loop over all faces to fill inlet outlet for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL); bool is_inlet_or_outlet = false; if (cond_it->GetValue (IS_STRUCTURE) == 0) is_inlet_or_outlet = true; else { for (unsigned int if_node = 0; if_node < TDim; if_node++) if (face_geometry[if_node].IsFixed (VELOCITY_X) ) is_inlet_or_outlet = true; } //slip condition if (is_inlet_or_outlet) //the opposite of the loop before for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue (AUX_INDEX) ); array_1d<double, TDim>& inout_normal = mInOutNormal[i_node]; mis_slip[i_node] = true; //reutilize it! for (unsigned int comp = 0; comp < TDim; comp++) { inout_normal[comp] += node_factor * face_normal[comp]; } } } // KRATOS_WATCH( mInOutNormal[7-1] ); // KRATOS_THROW_ERROR(std::logic_error,"remove line 2318 " ,""); std::vector< unsigned int> tempmInOutBoundaryList; for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { if (mis_slip[i_node] == true) tempmInOutBoundaryList.push_back (i_node); } mInOutBoundaryList.resize (tempmInOutBoundaryList.size(),false); #pragma omp parallel for for (int i=0; i<static_cast<int> (tempmInOutBoundaryList.size() ); i++) mInOutBoundaryList[i] = tempmInOutBoundaryList[i]; //store for future use the list of slip nodes #pragma omp parallel for for (int i=0; i<static_cast<int> (mis_slip.size() ); i++) mis_slip[ i ] = false; #pragma omp parallel for for (int i=0; i<static_cast<int> (mSlipBoundaryList.size() ); i++) mis_slip[ mSlipBoundaryList[i] ] = true; KRATOS_CATCH ("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mViscosity.clear(); mWork.clear(); mvel_n.clear(); mvel_n1.clear(); mPn.clear(); mPn1.clear(); mHmin.clear(); mHavg.clear(); mSlipNormal.clear(); mNodalFlag.clear(); mFixedVelocities.clear(); mFixedVelocitiesValues.clear(); mPressureOutletList.clear(); // mPressureOutlet.clear(); mSlipBoundaryList.clear(); mL.clear(); mTauPressure.clear(); mTauConvection.clear(); mTau2.clear(); mBeta.clear(); mPiConvection.clear(); mphi_n.clear(); mphi_n1.clear(); mEps.clear(); // mD.clear(); mA.clear(); mB.clear(); mdiv_error.clear(); mWallReductionFactor.clear(); mdiag_stiffness.clear(); mis_slip.clear(); mis_visited.clear(); macc.clear(); KRATOS_CATCH ("") } void ConvectDistance() { KRATOS_TRY //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables ValuesVectorType rhs, WorkConvection; rhs.resize (n_nodes); WorkConvection.resize (n_nodes); ValuesVectorType active_nodes; active_nodes.resize (n_nodes); // mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); //read variables from Kratos // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (DISTANCE, mphi_n1, mr_model_part.Nodes() ); mr_matrix_container.FillOldScalarFromDatabase (DISTANCE, mphi_n, mr_model_part.Nodes() ); //get the "fresh" values to be fixed_size for (unsigned int i=0; i< mDistanceValuesList.size(); i++) { mDistanceValuesList[ i ] = mphi_n1[ mDistanceBoundaryList[i] ]; } //mr_matrix_container.AssignVectorToVector(mphi_n1, mphi_n); //mWork = mphi_n // //chapuza // //set the distance to zero when it tries to go out of the pressure boundary // int pressure_size = mPressureOutletList.size(); // #pragma omp parallel for firstprivate(pressure_size) // for (int iii = 0; iii < pressure_size; iii++) // { // unsigned int i_node = mPressureOutletList[iii]; // mphi_n1[i_node] = fabs(mphi_n1[i_node]); // mphi_n[i_node] = fabs(mphi_n[i_node]); // } //create and fill a vector of nodes for which we want to convect the velocity for (int i_node = 0; i_node < n_nodes; i_node++) { active_nodes[i_node] = mis_visited[i_node]; } // ComputeConvectiveProjection(mPiConvection,mphi_n1,mEps,mvel_n1); // ComputeLimitor(mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); // mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, active_nodes, rNodes); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; double n_substeps = mnumsubsteps; // del double delta_t_substep = delta_t/n_substeps; for (unsigned int substep = 0; substep<n_substeps; substep++) { mr_matrix_container.AssignVectorToVector (mphi_n, WorkConvection); //mWork = mphi_n //first step of Runge Kutta // mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 6.0, mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, 0.5 * delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //second step mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 3.0, mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, 0.5 * delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //third step mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 3.0, mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //fourth step mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 6.0, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //compute right-hand side mr_matrix_container.AssignVectorToVector (WorkConvection, mphi_n1); mr_matrix_container.AssignVectorToVector (mphi_n1, mphi_n); } // // make sure that boundary nodes that are very close to the free surface get wet // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) { // unsigned int i_node = mSlipBoundaryList[i_slip]; // const double& h_i = mHmin[i_node]; // double& dist_i = mphi_n1[i_node]; // // if(dist_i > 0.0 && dist_i < 0.5*h_i) // { // //loop to all the edges surrounding node I // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mphi_n1[j_neighbour] <= 0.0) // dist_i = -0.01 * h_i; // } // } // // } // int fixed_size = mFixedVelocities.size(); // #pragma omp parallel for firstprivate(fixed_size) // for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) { // unsigned int i_node = mFixedVelocities[i_velocity]; // const double& h_i = mHmin[i_node]; // double& dist_i = mphi_n1[i_node]; // // if(dist_i > 0.0 && dist_i < 0.5*h_i) // { // //loop to all the edges surrounding node I // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mphi_n1[j_neighbour] <= 0.0) // dist_i = -0.01 * h_i; // } // } // } //wetten corner nodes if needed int corner_size = mcorner_nodes.size(); for (int i = 0; i < corner_size; i++) { int i_node = mcorner_nodes[i]; bool to_be_wettened = true; double min_dist = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; double neighb_dist = mphi_n1[j_neighbour]; if (min_dist > neighb_dist) min_dist = neighb_dist; if (neighb_dist >= 0.0) { to_be_wettened=false; } } if (to_be_wettened==true) mphi_n1[i_node] = min_dist; } mr_matrix_container.WriteScalarToDatabase (DISTANCE, mphi_n1, mr_model_part.Nodes() ); KRATOS_CATCH ("") } void ReduceTimeStep (ModelPart& rModelPart, double NewTime) { KRATOS_TRY /* double current_time = rModelPart.GetProcessInfo()[TIME]; double current_delta_time = rModelPart.GetProcessInfo()[DELTA_TIME]; double old_time = current_time - current_delta_time; double new_reduced_time = NewTtime; double new_delta_time = new_reduced_time - old_time; rModelPart.GetProcessInfo()[TIME] = new_reduced_time; rModelPart.GetProcessInfo()[DELTA_TIME] = new_delta_time; //now copy the database from the old step on the top of the current step int step_data_size = ThisModelPart.GetNodalSolutionStepDataSize(); double* current_data = (pnode)->SolutionStepData().Data(0); double* old_data = (pnode)->SolutionStepData().Data(1); for (int j = 0; j < step_data_size; j++) current_data[j] = old_data[j]; */ rModelPart.OverwriteSolutionStepData (1, 0); rModelPart.GetProcessInfo().SetCurrentTime (NewTime); KRATOS_CATCH ("error in reducing the time step") } bool CheckDistanceConvection() { int n_large_distance_gradient = 0; array_1d<double, TDim> grad_d; ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //calculate gradient of distance on the nodes and count occurrences of large gradients (that indicate a failure) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist <= 0.0) { for (unsigned int comp = 0; comp < TDim; comp++) grad_d[comp] = 0.0; double dist_i = mdistances[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& dist_j = mdistances[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (grad_d, dist_i, dist_j); } const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) grad_d[l_comp] *= m_inv; double norm_grad = norm_2 (grad_d); if (norm_grad > 1.5) //large gradient found n_large_distance_gradient += 1; } } if (n_large_distance_gradient != 0) { bool success = false; return success; } else { bool success = true; return success; } } void ActivateWallResistance (double Ywall) { mWallLawIsActive = true; mY_wall = Ywall; double max_angle_overall = 0.0; //compute wall reduction factor //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; /* const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; double AI = norm_2(an_i); array_1d<double,TDim> nI = an_i/AI; double min_dot_prod = 1.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& an_j = mSlipNormal[j_neighbour]; double AJ = norm_2(an_j); if(AJ > 1e-20) //...a slip node! { double tmp = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) tmp += nI[comp] * an_j[comp]; tmp /= AJ; tmp = fabs(tmp); if(tmp < min_dot_prod) min_dot_prod = tmp; } } double max_angle = acos(min_dot_prod); // max_angle *= 2.0; // if(max_angle > 3.1415926*0.5) max_angle = 3.1415926*0.5; if(max_angle > max_angle_overall) max_angle_overall = max_angle;*/ mWallReductionFactor[i_node] = 1.0; //sin(max_angle) + 0.1; // pow(sin(max_angle),6) * 10.0 /** 100.0*/ ; } std::cout << "max angle between normals found in the model = " << max_angle_overall << std::endl; // mr_matrix_container.WriteScalarToDatabase(YOUNG_MODULUS, mWallReductionFactor, mr_model_part.Nodes()); //slip condition // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double h = mHavg[i_node]; // if(mY_wall < h) // mWallReductionFactor[i_node] = mY_wall/h; // } // int edge_size = medge_nodes.size(); #pragma omp parallel for firstprivate(edge_size) for (int i = 0; i < edge_size; i++) { int i_node = medge_nodes[i]; mWallReductionFactor[i_node] = medge_coefficient; //10.0; } // // //apply conditions on corners int corner_size = mcorner_nodes.size(); for (int i = 0; i < corner_size; i++) { int i_node = mcorner_nodes[i]; mWallReductionFactor[i_node] = mcorner_coefficient; //50.0; } } void ActivateClassicalWallResistance (double Ywall) { mWallLawIsActive = true; mY_wall = Ywall; for (unsigned int i = 0; i < mWallReductionFactor.size(); i++) mWallReductionFactor[i] = 1.0 ; } double ComputeVolumeVariation() { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double dt = CurrentProcessInfo[DELTA_TIME]; //slip condition int inout_size = mInOutBoundaryList.size(); double vol_var = 0.0; //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < inout_size; i++) { unsigned int i_node = mInOutBoundaryList[i]; double dist = mdistances[i_node]; if (dist <= 0.0) { const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; double projection_length = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; } vol_var += projection_length; } } return -vol_var * dt; } double ComputeWetVolume() { KRATOS_TRY mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); //slip condition double wet_volume = 0.0; //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < static_cast<int> (mdistances.size() ); i++) { double dist = mdistances[i]; const double m = mr_matrix_container.GetLumpedMass() [i]; double porosity = mEps[i]; if (dist <= 0.0) { wet_volume += m/porosity; } } return wet_volume; KRATOS_CATCH (""); } double ComputeTotalVolume() { KRATOS_TRY mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); //slip condition double volume = 0.0; //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < static_cast<int> (mdistances.size() ); i++) { const double m = mr_matrix_container.GetLumpedMass() [i]; double porosity = mEps[i]; volume += m/porosity; } return volume; KRATOS_CATCH (""); } void DiscreteVolumeCorrection (double expected_volume, double measured_volume) { double volume_error = expected_volume - measured_volume; if (measured_volume < expected_volume) { double layer_volume = 0.0; std::vector<unsigned int> first_outside; int n_nodes = mdistances.size(); // find list of the first nodes outside of the fluid and compute their volume for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist > 0.0) //node is outside domain { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (mdistances[j_neighbour] <= 0.0) { const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass() [i_node]; if (nodal_mass < volume_error - layer_volume) { first_outside.push_back (i_node); layer_volume += nodal_mass; break; } //const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; //layer_volume += 1.0/m_inv; } } } } // std::cout << ", layer_volume: " << layer_volume << std::endl; // if (measured_volume + layer_volume <= expected_volume) { // mark the nodes in the outside layer with a small negative distance for (unsigned int i=0; i<first_outside.size(); i++) { unsigned int i_node = first_outside[i]; mdistances[i_node] = -mHavg[i_node]; } } } mr_matrix_container.WriteScalarToDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); //if (measured_volume < expected_volume) // { // double layer_volume = 0.0; // std::vector<unsigned int> first_outside; // int n_nodes = mdistances.size(); // //find list of the first nodes outside of the fluid and compute their volume // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double dist = mdistances[i_node]; // if (dist > 0.0) //node is outside domain // { // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mdistances[j_neighbour] <= 0.0) // { // first_outside.push_back(i_node); // const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; // layer_volume += 1.0/m_inv; // } // } // } // } // if (measured_volume + layer_volume <= expected_volume) // { // //mark the nodes in the outside layer with a small negative distance // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // mdistances[i_node] = -mHavg[i_node]; // } // } // } // mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); } void SetWallReductionCoefficients (double corner_coefficient, double edge_coefficient) { mcorner_coefficient = corner_coefficient; medge_coefficient = edge_coefficient; } void ContinuousVolumeCorrection (double expected_volume, double measured_volume) { double volume_error = expected_volume - measured_volume; if (volume_error == 0.0) return ; if (measured_volume < expected_volume) { double layer_volume = 0.0; std::vector<unsigned int> first_outside; int n_nodes = mdistances.size(); // find list of the first nodes outside of the fluid and compute their volume for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; bool is_bubble = true; bool is_first_outside = false; if (dist > 0.0) //node is outside domain { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (mdistances[j_neighbour] <= 0.0) { is_first_outside = true; } else is_bubble = false; } } if (is_first_outside && !is_bubble) { const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass() [i_node]; first_outside.push_back (i_node); layer_volume += nodal_mass; // if(nodal_mass > volume_error - layer_volume) // { // extra_volume += nodal_mass; // } } } // std::cout << ", layer_volume: " << layer_volume << std::endl; if (layer_volume == 0.00) return; double ratio = volume_error / layer_volume; if (ratio > 1.0) ratio = 1.0; // KRATOS_WATCH (ratio); if (ratio < 0.1) // NO correction for less than 10% error return; double average_layer_h = 0.0; for (unsigned int i=0; i<first_outside.size(); i++) { unsigned int i_node = first_outside[i]; average_layer_h += mHavg[i_node]; } average_layer_h /= static_cast<double> (first_outside.size() ); for (int i_node = 0; i_node < n_nodes; i_node++) mdistances[i_node] -= average_layer_h* ratio; // if((ratio < 1.00)) // { // // mark the nodes in the outside layer with a small negative distance // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // mdistances[i_node] -= mHavg[i_node] * ratio; // } // } // else // { // // mark the nodes in the outside layer with a small negative distance // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // mdistances[i_node] = -mHavg[i_node]; // } // } } mr_matrix_container.WriteScalarToDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); return; } // void FindBubbles() // { // int n_nodes = mdistances.size(); // ValuesVectorType last_air (n_nodes); // mr_matrix_container.SetToZero (last_air); // mr_matrix_container.FillScalarFromDatabase (LAST_AIR, last_air, mr_model_part.Nodes() ); // const int max_bubble_nodes = 12; // const int min_bubble_nodes = 2; // #pragma omp parallel for // for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) // mis_visited[i_node] = 0; // // // loop over the nodes to find a outside node. // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double dist = mdistances[i_node]; // if ( (mis_visited[i_node] == 0) && (dist > 0.0) ) // node is outside the domain and has not visited yet // { // std::vector<int> outside_nodes (n_nodes,0); // outside_nodes[0] = i_node; // mis_visited[i_node] = 1; // int n_outside = 1; // for (int i = 0 ; i < n_outside ; i++) // loop over founded outside nodes. NOTE: n_outside is increasing inside the loop // { // int this_node = outside_nodes[i]; // // loop over neighbours of this node // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [this_node]; csr_index != mr_matrix_container.GetRowStartIndex() [this_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; // if ( (mis_visited[j_neighbour] == 0) && (mdistances[j_neighbour] >= 0.0) ) // the neighbour node is outside the fluid and not visited yet // { // outside_nodes[n_outside] = j_neighbour; // n_outside++; // } // mis_visited[j_neighbour] = 1; // } // } // //KRATOS_WATCH(i_node); // //KRATOS_WATCH(n_outside); // //KRATOS_WATCH(is_first_outside); // if ( (n_outside <= max_bubble_nodes) && (n_outside >= min_bubble_nodes) ) // { // //KRATOS_WATCH(i_node); // //KRATOS_WATCH(n_outside); // for (int i = 0 ; i < n_outside ; i++) // last_air[outside_nodes[i]] = 1.00; // } // } // } // mr_matrix_container.WriteScalarToDatabase (LAST_AIR, last_air, mr_model_part.Nodes() ); // } // // void FindColdShots() // { // int n_nodes = mdistances.size(); // ValuesVectorType cold_shots(n_nodes); // // mr_matrix_container.SetToZero(cold_shots); // // mr_matrix_container.FillScalarFromDatabase(LAST_AIR, cold_shots, mr_model_part.Nodes()); // // std::vector<bool> is_first_outside(n_nodes, 0); // // std::vector<unsigned int> first_outside; // // // find list of the first nodes outside of the fluid // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double dist = mdistances[i_node]; // if (dist > 0.0) //node is outside domain // { // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mdistances[j_neighbour] <= 0.0) // { // is_first_outside[i_node] = true; // first_outside.push_back(i_node); // break; // } // } // } // } // // // std::vector<bool> is_cold_shot(is_first_outside); // // // Now we check if all the neighbours of the first_outside nodes are first outside or inside and mark it as a possible cold shot // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(!is_first_outside[j_neighbour]) // { // is_cold_shot[i_node] = false; // break; // } // } // } // // // //Now we have the possible cold shots and is time to check the gradient of convection // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // if(is_cold_shot[i_node]) // { // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mdistances[j_neighbour] <= 0.0) // { // // } // } // } // } // // // // // Adding the founded cold shots to the previous ones. // for(int i_node = 0; i_node < n_nodes; i_node++) // if(is_cold_shot[i_node]) // cold_shots[i_node]=1.00; // // mr_matrix_container.WriteScalarToDatabase(LAST_AIR, cold_shots, mr_model_part.Nodes()); // } void CalculatePorousResistanceLaw(unsigned int res_law) { //variables for node based data handling // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes()); // const double nu_i = mViscosity; if (res_law == 1) { // KRATOS_WATCH("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Calculating Ergun Darcy coefficients ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") /* if the chosen resistance law is ERGUN calculate Ergun A and B*/ for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { const double eps = inode->FastGetSolutionStepValue (POROSITY); // KRATOS_WATCH("POROSITY ") // KRATOS_WATCH(eps) const double d = inode->FastGetSolutionStepValue (DIAMETER); // KRATOS_WATCH("DIAMETER ") // KRATOS_WATCH(d) // // KRATOS_WATCH("VISCOSITY ") // KRATOS_WATCH(mViscosity) double& a = inode-> FastGetSolutionStepValue (LIN_DARCY_COEF); double& b = inode-> FastGetSolutionStepValue (NONLIN_DARCY_COEF); if (eps < 1.0) { double k_inv = 150.0 * (1.0 - eps) * (1.0 - eps) / (eps * eps * eps * d * d); a = mViscosity * k_inv; b = (1.75 / eps) * sqrt (k_inv / (150.0 * eps) ); // KRATOS_WATCH("PERMEABILITY ") // KRATOS_WATCH(k_inv) // KRATOS_WATCH("LIN DARCY COEFFICIENT ") // KRATOS_WATCH(a) // KRATOS_WATCH("NONLIN DARCY COEFFICIENT ") // KRATOS_WATCH(b) } else { a = 0; b = 0; } } } else { /* whether it is a Custom Resistance law or NO resistance law is present ---> set to zero A and B for non porous nodes*/ for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { const double eps = inode->FastGetSolutionStepValue (POROSITY); /*reading from kratos database*/ double& a = inode-> FastGetSolutionStepValue (LIN_DARCY_COEF); /*changing kratos database*/ double& b = inode-> FastGetSolutionStepValue (NONLIN_DARCY_COEF); /*changing kratos database*/ if (eps == 1.0) { a = 0; b = 0; } } } mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); /*filling edgebased database reading from kratos database*/ mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); /*filling edgebased database reading from kratos database*/ } private: double mMolecularViscosity; double mcorner_coefficient; double medge_coefficient; double mmax_dt; MatrixContainer& mr_matrix_container; ModelPart& mr_model_part; int mnumsubsteps; bool muse_mass_correction; //parameters controlling the wall law bool mWallLawIsActive; double mY_wall; //parameters for controlling the usage of the delta time in the stabilization double mstabdt_pressure_factor; double mstabdt_convection_factor; double medge_detection_angle; double mtau2_factor; bool massume_constant_dp; //nodal values ValuesVectorType mViscosity; //velocity vector U at time steps n and n+1 CalcVectorType mWork, mvel_n, mvel_n1, mx, macc; //pressure vector p at time steps n and n+1 ValuesVectorType mPn, mPn1; //coefficients ValuesVectorType mdistances; //minimum length of the edges surrounding edges surrounding each nodal point ValuesVectorType mHmin; ValuesVectorType mHavg; CalcVectorType mEdgeDimensions; //area normal CalcVectorType mSlipNormal; CalcVectorType mInOutNormal; //projection terms CalcVectorType mPi, mXi; //flag for first time step bool mFirstStep; //flag to differentiate interior and boundary nodes ValuesVectorType mNodalFlag; ValuesVectorType mWallReductionFactor; //lists of nodes with different types of boundary conditions IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities, mInOutBoundaryList,mDistanceBoundaryList; ValuesVectorType mDistanceValuesList; CalcVectorType mFixedVelocitiesValues; // ValuesVectorType mPressureOutlet; //intrinsic time step size ValuesVectorType mTauPressure; ValuesVectorType mTauConvection; ValuesVectorType mTau2; ValuesVectorType mdiv_error; boost::numeric::ublas::vector<bool> mis_slip; boost::numeric::ublas::vector<int> mis_visited; //variables for resolving pressure equation //laplacian matrix TSystemMatrixType mL; //constant variables double mRho; array_1d<double, TDim> mBodyForce; //variables for convection ValuesVectorType mphi_n; ValuesVectorType mphi_n1; CalcVectorType mPiConvection; ValuesVectorType mBeta; //variables for edge BCs IndicesVectorType medge_nodes; CalcVectorType medge_nodes_direction; IndicesVectorType mcorner_nodes; ValuesVectorType mEps; ValuesVectorType mdiag_stiffness; // ValuesVectorType mD; ValuesVectorType mA; ValuesVectorType mB; double mdelta_t_avg; double max_dt; double mshock_coeff; //*********************************************************** //functions to calculate area normals for boundary conditions void CalculateNormal2D (ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal) { Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry(); area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y(); area_normal[1] = - (face_geometry[1].X() - face_geometry[0].X() ); area_normal[2] = 0.00; noalias ( (cond_it)->GetValue (NORMAL) ) = area_normal; } void CalculateNormal3D (ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2) { Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry(); v1[0] = face_geometry[1].X() - face_geometry[0].X(); v1[1] = face_geometry[1].Y() - face_geometry[0].Y(); v1[2] = face_geometry[1].Z() - face_geometry[0].Z(); v2[0] = face_geometry[2].X() - face_geometry[0].X(); v2[1] = face_geometry[2].Y() - face_geometry[0].Y(); v2[2] = face_geometry[2].Z() - face_geometry[0].Z(); MathUtils<double>::CrossProduct (area_normal, v1, v2); area_normal *= -0.5; noalias ( (cond_it)->GetValue (NORMAL) ) = area_normal; } //********************************************************* //function to calculate minimum length of surrounding edges void CalculateEdgeLengths (ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //get number of nodes unsigned int n_nodes = rNodes.size(); //reserve memory for storage of nodal coordinates std::vector< array_1d<double, TDim > > position; position.resize (n_nodes); //get position of all nodes for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++) { //get the global index of the node unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue (AUX_INDEX) ); //save its coordinates locally noalias (position[i_node]) = node_it->Coordinates(); //initialize minimum edge length with relatively big values // mHmin[i_node] = 1e10; } ValuesVectorType& aaa = mr_matrix_container.GetHmin(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { mHmin[i_node] = aaa[i_node]; if (aaa[i_node] == 0.0) KRATOS_THROW_ERROR (std::logic_error,"found a 0 hmin on node",i_node); } //take unstructured meshes into account if (TDim == 2) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass() [i_node]; // double& rho_i = mRho[i_node]; h_i = sqrt (2.0 * m_i); } } else if (TDim == 3) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass() [i_node]; // double& rho_i = mRho[i_node]; h_i = pow (6.0 * m_i, 1.0 / 3.0); } } //compute edge coordinates for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim > & pos_i = position[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim > & pos_j = position[j_neighbour]; array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; for (unsigned int comp = 0; comp < TDim; comp++) l_k[comp] = pos_i[comp] - pos_j[comp]; } } KRATOS_CATCH ("") } //********************************************************************* //function to calculate right-hand side of fractional momentum equation void CalculateRHS_convection ( const ValuesVectorType& mphi, const CalcVectorType& convective_velocity, ValuesVectorType& rhs, ValuesVectorType& active_nodes ) { KRATOS_TRY int n_nodes = mphi.size(); // //calculating the convective projection //#pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // // double& pi_i = mPiConvection[i_node]; // const double& phi_i = mphi[i_node]; // // //set to zero the projection // pi_i = 0; // if (active_nodes[i_node] != 0.0) // { // // const array_1d<double, TDim>& a_i = convective_velocity[i_node]; // // //loop to all the edges surrounding node I // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // // if (active_nodes[j_neighbour] != 0.0) // { // const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour]; // const double& phi_j = mphi[j_neighbour]; // // CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; // // edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j); // } // } // // //apply inverted mass matrix // const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; // pi_i *= m_inv; // } // // KRATOS_WATCH(pi_i); // // num = fabs(num); // // if(num > norm_vI*0.0001) // // mBeta[i_node] = 1.0 - num/denom; // // else // // mBeta[i_node] = 1.0; // // } //perform MPI syncronization //calculating the RHS double stab_low; double stab_high; array_1d<double, TDim> a_i; array_1d<double, TDim> a_j; #pragma omp parallel for private(stab_low,stab_high,a_i,a_j) for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; const double& h_i = mHavg[i_node]; const double& phi_i = mphi[i_node]; noalias (a_i) = convective_velocity[i_node]; a_i /= mEps[i_node]; const array_1d<double, TDim>& proj_i = mPiConvection[i_node]; // const double& pi_i = mPiConvection[i_node]; double pi_i = proj_i[0] * a_i[0]; for (unsigned int l_comp = 1; l_comp < TDim; l_comp++) pi_i += proj_i[l_comp] * a_i[l_comp]; // double beta = mBeta[i_node]; rhs_i = 0.0; if (active_nodes[i_node] != 0.0) { const double& beta = mBeta[i_node]; double norm_a = a_i[0] * a_i[0]; for (unsigned int l_comp = 1; l_comp < TDim; l_comp++) norm_a += a_i[l_comp] * a_i[l_comp]; norm_a = sqrt (norm_a); //loop to all the edges surrounding node I for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (active_nodes[j_neighbour] != 0.0) { //double& rhs_j = rhs[j_neighbour]; const double& phi_j = mphi[j_neighbour]; noalias (a_j) = convective_velocity[j_neighbour]; a_j /= mEps[j_neighbour]; // const double& pi_j = mPiConvection[j_neighbour]; const array_1d<double, TDim>& proj_j = mPiConvection[j_neighbour]; double pi_j = proj_j[0] * a_i[0]; for (unsigned int l_comp = 1; l_comp < TDim; l_comp++) pi_j += proj_j[l_comp] * a_i[l_comp]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; //convection operator edge_ij.Sub_ConvectiveContribution (rhs_i, a_i, phi_i, a_j, phi_j); //esto funciona // edge_ij.Sub_D_v(rhs_i, a_i*phi_i, a_i*phi_j); //calculate stabilization part edge_ij.CalculateConvectionStabilization_LOW (stab_low, a_i, phi_i, a_j, phi_j); double edge_tau = mTauConvection[i_node]; edge_ij.CalculateConvectionStabilization_HIGH (stab_high, a_i, pi_i, a_j, pi_j); edge_ij.Sub_StabContribution (rhs_i, edge_tau, 1.0, stab_low, stab_high); double coeff = 0.5 * mshock_coeff; //=0.7*0.5; double laplacian_ij = 0.0; edge_ij.CalculateScalarLaplacian (laplacian_ij); double capturing = laplacian_ij * (phi_j - phi_i); // rhs_i-= coeff*capturing*beta*norm_a*h_i; double aaa = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) aaa += a_i[k_comp] * a_i[m_comp] * edge_ij.LaplacianIJ (k_comp, m_comp); if (norm_a > 1e-10) { aaa /= (norm_a * norm_a); double capturing2 = aaa * (phi_j - phi_i); if (fabs (capturing) > fabs (capturing2) ) rhs_i -= coeff * (capturing - capturing2) * beta * norm_a * h_i; } } } } // KRATOS_WATCH(rhs_i); } // int inout_size = mInOutBoundaryList.size(); // //#pragma omp parallel for firstprivate(slip_size) // for (int i = 0; i < inout_size; i++) // { // unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { // const array_1d<double, TDim>& U_i = mvel_n1[i_node]; // const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; // double projection_length = 0.0; // double Ain = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // projection_length += U_i[comp] * an_i[comp]; // Ain += an_i[comp]*an_i[comp]; // } // // double& rhs_i = rhs[i_node]; // // rhs_i += projection_length * mphi[i_node]; // } // } // int inout_size = mInOutBoundaryList.size(); // double vol_var = 0.0; // //#pragma omp parallel for firstprivate(slip_size) // for (int i = 0; i < inout_size; i++) // { // unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // // if (dist <= 0.0) // // { // const array_1d<double, TDim>& U_i = mvel_n1[i_node]; // const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; // double A = norm_2(an_i); // // double projection_length = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // projection_length += U_i[comp] * an_i[comp]; // } // // double& rhs_i = rhs[i_node]; // // if(projection_length > 0) //outlet // // rhs_i += A; // // else // rhs_i -= A; // // // } // } KRATOS_CATCH ("") } //************************************** void CornerDectectionHelper (Geometry< Node < 3 > >& face_geometry, const array_1d<double, 3 > & face_normal, const double An, const GlobalPointersVector<Condition>& neighb, const unsigned int i1, const unsigned int i2, const unsigned int neighb_index, std::vector<unsigned int>& edge_nodes, CalcVectorType& cornern_list ) { double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted double acceptable_cos = cos (acceptable_angle); if (face_geometry[i1].Id() < face_geometry[i2].Id() ) //we do this to add the face ones { const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue (NORMAL); double neighb_An = norm_2 (neighb_normal); double cos_normal = 1.0 / (An * neighb_An) * inner_prod (face_normal, neighb_normal); //if the angle is too big between the two normals then the edge in the middle is a corner if (cos_normal < acceptable_cos) { array_1d<double, 3 > edge = face_geometry[i2].Coordinates() - face_geometry[i1].Coordinates(); double temp = norm_2 (edge); edge /= temp; int index1 = face_geometry[i1].FastGetSolutionStepValue (AUX_INDEX); int index2 = face_geometry[i2].FastGetSolutionStepValue (AUX_INDEX); edge_nodes[index1] += 1; edge_nodes[index2] += 1; // double sign1 = inner_prod (cornern_list[index1], edge); double sign1 = 0.0; for(unsigned int i = 0 ; i < edge.size() ; i++) {sign1 += cornern_list[index1][i]*edge[i];} if (sign1 >= 0) { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index1][i] += edge[i]; } else { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index1][i] -= edge[i]; } double sign2 = inner_prod(cornern_list[index2], edge); if (sign2 >= 0) { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index2][i] += edge[i]; } else { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index2][i] -= edge[i]; } } } } //function to calculate the area normals void DetectEdges3D (ModelPart::ConditionsContainerType& rConditions) { KRATOS_TRY //calculate area normals face-by-face array_1d<double, 3 > area_normal; //(re)initialize normals unsigned int n_nodes = mNodalFlag.size(); std::vector<unsigned int> temp_edge_nodes (n_nodes); CalcVectorType temp_cornern_list (n_nodes); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { temp_edge_nodes[i_node] = 0.0; noalias (temp_cornern_list[i_node]) = ZeroVector (TDim); } //loop over all faces // const double node_factor = 1.0 / TDim; for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face const array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL); double An = norm_2 (face_normal); unsigned int current_id = cond_it->Id(); //slip condition if (cond_it->GetValue (IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours { const GlobalPointersVector<Condition>& neighb = cond_it->GetValue (NEIGHBOUR_CONDITIONS); //check for neighbour zero if (neighb[0].Id() != current_id) //check if the neighbour exists CornerDectectionHelper (face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list); //check for neighbour one if (neighb[1].Id() != current_id) //check if the neighbour exists CornerDectectionHelper (face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list); //check for neighbour two if (neighb[2].Id() != current_id) //check if the neighbour exists CornerDectectionHelper (face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list); } } // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.WriteVectorToDatabase(ACCELERATION, temp_cornern_list, rNodes); //fill the list of edge_nodes std::vector<unsigned int> tempmedge_nodes; std::vector< array_1d<double,TDim> > tempmedge_nodes_direction; std::vector<unsigned int> tempmcorner_nodes; for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { if (temp_edge_nodes[i_node] == 2) //node is a edge_node { tempmedge_nodes.push_back (i_node); array_1d<double, TDim>& node_edge = temp_cornern_list[i_node]; node_edge /= norm_2 (node_edge); tempmedge_nodes_direction.push_back (node_edge); } else if (temp_edge_nodes[i_node] > 2) tempmcorner_nodes.push_back (i_node); } medge_nodes.resize (tempmedge_nodes.size(),false); medge_nodes_direction.resize (tempmedge_nodes_direction.size(),false); mcorner_nodes.resize (tempmcorner_nodes.size(),false); #pragma omp parallel for for (int i = 0; i < static_cast<int> (tempmedge_nodes.size() ); i++) { medge_nodes[i] = tempmedge_nodes[i]; medge_nodes_direction[i] = tempmedge_nodes_direction[i]; } #pragma omp parallel for for (int i = 0; i < static_cast<int> (tempmcorner_nodes.size() ); i++) { mcorner_nodes[i] = tempmcorner_nodes[i]; } for (unsigned int i = 0; i < mcorner_nodes.size(); i++) { KRATOS_WATCH (mcorner_nodes[i]); } KRATOS_CATCH ("") } // double ComputePorosityCoefficient(const double& viscosity, const double& vel_norm, const double& eps, const double& d) // { // // const double d = 0.01; //to be changed // double linear; // double non_linear; // if (eps < 1.0) // { // double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d); // linear = eps * viscosity * k_inv; // non_linear = (1.75 * vel_norm) * sqrt(k_inv / (150.0 * eps)); // // double linear = viscosity * k_inv; // // double non_linear = (1.75 * vel_norm / eps) * sqrt(k_inv / (150.0 * eps)); // } // else // { // linear = 0.0; // non_linear = 0.0; // } // return linear + non_linear; // } double ComputePorosityCoefficient (const double& vel_norm, const double& eps, const double& a, const double& b) { double linear; double non_linear; linear = eps * a; non_linear = eps * b * vel_norm; return linear + non_linear; } void LaplacianSmooth (ValuesVectorType& to_be_smoothed, ValuesVectorType& aux) { ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; double correction = 0.0; const double& origin_i = to_be_smoothed[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& origin_j = to_be_smoothed[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; double l_ikjk; edge_ij.CalculateScalarLaplacian (l_ikjk); correction += l_ikjk * (origin_j - origin_i); } } aux[i_node] = origin_i - correction; } #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) to_be_smoothed[i_node] = aux[i_node]; } void ComputeWallResistance ( const CalcVectorType& vel, ValuesVectorType& diag_stiffness // CalcVectorType& rhs ) { //parameters: // double k = 0.41; // double B = 5.1; // double density = mRho; // double toll = 1e-6; double ym = mY_wall; //0.0825877; //0.0093823 // double y_plus_incercept = 10.9931899; // unsigned int itmax = 100; if (mViscosity[0] == 0) KRATOS_THROW_ERROR (std::logic_error, "it is not possible to use the wall law with 0 viscosity", ""); /* //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { double nu = mViscosity[i_node]; //array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; //compute the modulus of the velocity double mod_vel = 0.0; double area = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { mod_vel += U_i[comp] * U_i[comp]; area += an_i[comp] * an_i[comp]; } mod_vel = sqrt(mod_vel); area = sqrt(area); //now compute the skin friction double mod_uthaw = sqrt(mod_vel * nu / ym); double y_plus = ym * mod_uthaw / nu; if (y_plus > y_plus_incercept) { //begin cicle to calculate the real u_thaw's module: unsigned int it = 0; double dx = 1e10; // KRATOS_WATCH(fabs(dx)); while ( (fabs(dx) > toll * mod_uthaw) && (it < itmax) ) { double a = 1.0 / k; double temp = a * log(ym * mod_uthaw / nu) + B; double y = mod_uthaw * (temp) - mod_vel; double y1 = temp + a; dx = y / y1; mod_uthaw -= dx; it = it + 1; } if (it == itmax) std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl; } double tau = mod_uthaw * mod_uthaw ; tau *= mWallReductionFactor[i_node]; if (mod_vel > 1e-9) diag_stiffness[i_node] = tau * area / mod_vel;*/ /* int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { double nu = mViscosity[i_node]; //array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; //compute the modulus of the velocity double mod_vel = 0.0; double area = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { mod_vel += U_i[comp] * U_i[comp]; area += an_i[comp] * an_i[comp]; } mod_vel = sqrt (mod_vel); area = sqrt (area); diag_stiffness[i_node] = area * mod_vel /pow(1.0/k*log(100) + B,2) * mWallReductionFactor[ i_node ]; } else diag_stiffness[i_node] = 0.0; }*/ //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size,ym) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { double nu = mMolecularViscosity; //mViscosity[i_node]; //array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; //compute the modulus of the velocity double mod_vel = 0.0; double area = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { mod_vel += U_i[comp] * U_i[comp]; area += an_i[comp] * an_i[comp]; } mod_vel = sqrt (mod_vel); area = sqrt (area); //the 0.1 is such that the dissipation is as for the linear case for a velocity of 10m/s diag_stiffness[i_node] = area * nu * mod_vel/ (ym ) * mWallReductionFactor[ i_node ] ; } else { diag_stiffness[i_node] = 0.0 ; } } // //apply higher resistance normally to the edges // int edge_size = medge_nodes_direction.size(); // #pragma omp parallel for firstprivate(edge_size) // for (int i = 0; i < edge_size; i++) // { // int i_node = medge_nodes[i]; // double dist = mdistances[i_node]; // // if(dist <= 0.0) // { // double nu = mViscosity[i_node]; // const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // // //compute the modulus of the velocity // double area = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // area += an_i[comp] * an_i[comp]; // } // area = sqrt (area); // // diag_stiffness[i_node] += area * nu / (ym ) ; // // } // } // // int corner_size = mcorner_nodes.size(); // for (int i = 0; i < corner_size; i++) // { // int i_node = mcorner_nodes[i]; // double nu = mViscosity[i_node]; // mWallReductionFactor[i_node] = mcorner_coefficient; //50.0; // const double m = mr_matrix_container.GetLumpedMass()[i_node]; // diag_stiffness[i_node] += 100.0*m * nu / (ym ) ; // } } void ApplySmagorinsky3D (double MolecularViscosity, double Cs) { KRATOS_TRY ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); //calculating the RHS array_1d<double, TDim> grad_vx; array_1d<double, TDim> grad_vy; array_1d<double, TDim> grad_vz; int n_nodes = rNodes.size(); mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); array_1d<double, TDim> stab_high; #pragma omp parallel for private(grad_vx,grad_vy,grad_vz) for (int i_node = 0; i_node < n_nodes; i_node++) { //set to zero the gradients for (unsigned int comp = 0; comp < TDim; comp++) { grad_vx[comp] = 0.0 ; grad_vy[comp] = 0.0 ; grad_vz[comp] = 0.0 ; } //compute node by node the gradients const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const double h = mHmin[i_node]; const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]); edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]); edge_ij.Add_grad_p (grad_vz, U_i[2], U_j[2]); } //finalize computation of the gradients //set to zero the gradients for (unsigned int comp = 0; comp < TDim; comp++) { grad_vx[comp] *= m_inv ; grad_vy[comp] *= m_inv ; grad_vz[comp] *= m_inv ; } //symmetrize and multiply by 2 grad_vx[0] *= 2.0; grad_vy[1] *= 2.0; if(TDim > 2) grad_vz[2] *= 2.0; grad_vx[1] += grad_vy[0]; if(TDim > 2) grad_vx[2] += grad_vz[0]; if(TDim > 2) grad_vy[2] += grad_vz[1]; grad_vy[0] += grad_vx[1]; grad_vz[0] += grad_vx[2]; grad_vz[1] += grad_vy[2]; //compute smagorinsky term double aux = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { aux += grad_vx[comp] * grad_vx[comp] ; aux += grad_vy[comp] * grad_vy[comp] ; aux += grad_vz[comp] * grad_vz[comp] ; } aux *= 0.5; if (aux < 0.0 ) aux=0.0; double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/; // KRATOS_WATCH(aux); // KRATOS_WATCH(turbulent_viscosity); mViscosity[i_node] = turbulent_viscosity + MolecularViscosity; } mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes); KRATOS_CATCH (""); } void Add_Effective_Inverse_Multiply ( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& mass, const ValuesVectorType& diag_stiffness, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m = mass[i_node]; const double d = diag_stiffness[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = value / (m + value*d) * ( m/value * origin_vec1[comp] + origin_value[comp] ); } KRATOS_CATCH ("") } void ComputeConvectiveProjection ( CalcVectorType& mPiConvection, const ValuesVectorType& mphi_n1, const ValuesVectorType& mEps, const CalcVectorType& mvel_n1 ) { int n_nodes = mPiConvection.size(); //calculating the convective projection array_1d<double, TDim> a_i; array_1d<double, TDim> a_j; #pragma omp parallel for private(a_i,a_j) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPiConvection[i_node]; // setting to zero the projection for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] = 0.0; /* if (active_nodes[i_node] != 0.0) {*/ const double& phi_i = mphi_n1[i_node]; noalias (a_i) = mvel_n1[i_node]; a_i /= mEps[i_node]; // loop to all the edges surrounding node I for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; noalias (a_j) = mvel_n1[j_neighbour]; a_j /= mEps[j_neighbour]; const double& phi_j = mphi_n1[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (pi_i, phi_i, phi_j); // if(i_node == 3255) // { // KRATOS_WATCH(j_neighbour) // KRATOS_WATCH(pi_i) // KRATOS_WATCH(mEps[i_node]) // KRATOS_WATCH(mEps[j_neighbour]) // KRATOS_WATCH(phi_i) // KRATOS_WATCH(phi_j) // KRATOS_WATCH(a_i) // KRATOS_WATCH(a_j) // KRATOS_WATCH(mr_matrix_container.GetInvertedMass()[i_node]) // KRATOS_WATCH(edge_ij.Ni_DNj) // // } } // apply inverted mass matrix const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] *= m_inv; // std::cout << i_node << " " << pi_i << " " << mvel_n1[i_node] << " " << phi_i <<std::endl; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // if(std::isnan(pi_i[l_comp])) // KRATOS_WATCH(m_inv); // } } } void ComputeLimitor ( CalcVectorType& mPiConvection, const ValuesVectorType& mphi_n1, ValuesVectorType& mBeta, const CalcVectorType& mvel_n1, const CalcVectorType& mEdgeDimensions ) { int n_nodes = mPiConvection.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { const array_1d<double, TDim>& pi_i = mPiConvection[i_node]; const double& p_i = mphi_n1[i_node]; double& beta_i = mBeta[i_node]; beta_i = 0.0; double n = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& p_j = mphi_n1[j_neighbour]; const array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; const array_1d<double, TDim>& pi_j = mPiConvection[j_neighbour]; // double proj = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // proj += 0.5*l_k[comp]*(pi_i[comp]+pi_j[comp]); // double beta = fabs((p_i - p_j - proj)/(fabs(p_i-p_j)+fabs(proj)+1e-4)); double proj = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) proj += 0.5 * l_k[comp]* (pi_i[comp] + pi_j[comp]); // proj += dir[comp]*pi_i[comp]; double numerator = fabs (fabs (p_j - p_i) - fabs (proj) ); double denom = fabs (fabs (p_j - p_i) + 1e-6); beta_i += numerator / denom; n += 1.0; } beta_i /= n; if (beta_i > 1.0) beta_i = 1.0; } } }; } //namespace Kratos #undef SYMM_PRESS #endif //KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED defined
GB_unop__ainv_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fp64_fp64) // op(A') function: GB (_unop_tran__ainv_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Common.h
#ifndef _COMMON_ #define _COMMON_ #include "zeroin.h" #include "average.h" #include <map> #include <vector> #ifdef _STRSTREAM #include <strstream> #endif using namespace std; typedef vector<int>::size_type vint; int Binomial(int n, int m) { int Mf = 1; for (int i=2; i<=m; i++) Mf *= i; int r = 1; for (int i=n; i>=n-m+1; i--) r*=i; return r/Mf; } //Common constants and variables class common{ public: static double U; static double T; // static double J; static int baths; static int Na, Nc; static function1D<int> Ns; static function2D<double> Ms; static function1D<int> Mtot; static function1D<int> deg; static function1D<double> sJc; static vector<vector<map<int,double> > > sncab; // index for hole diagrams static vector<vector<map<int,double> > > sncaf; // index for particle diagrams static vector<map<int,double> > suscb; // index for susceptibility static function2D<int> ncab; // index for hole diagrams static function2D<int> ncaf; // index for particle diagrams static function2D<double> prefactb; // prefactor for hole digrams static function2D<double> prefactf; // prefactor for particle diagrams static function2D<double> prefactG; // prefactor to calculate local Green's function static function1D<double> Ed; static function1D<double> Sinfty; static function1D<double> nalpha; static function1D<double> miss_nd; static function2D<double> moment; static double beta; static double delta; static double Q; static double Q0; static double nd; static double nd0; static double lambda0; static string outdir; static int totDeg; static function1D<string> Eds; static int N_ac; static double dom_ac; static int acore, pcore; static bool SubtractLorentz; static double LorentzMaxRatio; static double SearchLorentz; static int FirstLorentz; static int LastLorentz; static double dlmin; static bool renorm_core, renorm; static bool cmp_susc; static double Fimp, Epot, TrLogGimp; static void SetParameters(Par<double>& Ed_, double U_, /*double J_, */double T_, double Q0_, const string& outdir_, int N_ac_, double dom_ac_, int acore_, int pcore_, bool SubtractLorentz_, double SearchLorentz_, double LorentzMaxRatio_, int FirstLorentz_, int LastLorentz_, bool renorm_core_, bool renorm_) { dlmin = 2.0; LorentzMaxRatio = LorentzMaxRatio_; SearchLorentz = SearchLorentz_; SubtractLorentz=SubtractLorentz_; FirstLorentz=FirstLorentz_; // First pseudoparticle which could be augmented with lorentz LastLorentz=LastLorentz_; // Last pseudoparticle which could be augmented with lorentz Ed.resize(baths); int i=0; while (Ed_.next() && i<baths) { Ed[i] = Ed_; i++; } for (int j=i; j<baths; j++) Ed[j]=Ed[i-1]; T = T_; U = U_; // J = J_; beta=1/T_; Q0 = Q0_; outdir = outdir_; Eds.resize(baths); for (int i=0; i<baths; i++){ stringstream t; t<<"E"<<i; Eds[i] = t.str(); } nalpha.resize(baths); miss_nd.resize(baths); for (int i=0; i<baths; i++) miss_nd[i]=0; N_ac = N_ac_; dom_ac = dom_ac_; acore = acore_; pcore = pcore_; renorm_core=renorm_core_; renorm=renorm_; moment.resize(baths,2); Fimp=Epot=TrLogGimp=0.0; } static void ParsInputFile(const string& filename); static void PrintParsedData(ostream& stream); static ostream& printHead(ostream& stream); }; class sLorentz{ public: double x0, gamma, P; bool exist; sLorentz() : x0(0), gamma(1), P(0), exist(false){}; void Set(double zero, double eps, double a, double p, double q, double r) { exist = true; //double A = (sqr(1-p)+sqr(q))/a-2*eps*q*r/sqr(a)+sqr(eps*r/a)/a; double A = (sqr(1-p)+sqr(q))/a-2*eps*q*(r/a)/a+sqr(eps*r/a)/a; double B = eps*q/a-sqr(eps)/a*(r/a)/2; double C = sqr(eps)/a; double b2 = C/A-sqr(B/A); x0 = -B/A; gamma = (b2>0)? sqrt(b2) : sqrt(abs(C/A)); if (gamma==0) { exist=false; P=0; return; } //cout<<"a="<<a<<" A="<<A<<" B="<<B<<" C="<<C<<" b2="<<b2<<" gamma="<<gamma<<endl; P = 1/(A*gamma); x0 += zero; } void SetFalse(){exist=false; P=0;} private: double IntgA(double om0, double om1, double A0, double A1, double omega, double x0) const { if (!exist) return 0; if (fabs(om1-om0)*100<gamma) return P*gamma*0.5*(A0+A1)*(om1-om0)/(sqr(0.5*(om0+om1)+omega-x0)+sqr(gamma)); double c0 = om0 + omega - x0; double c1 = om1 + omega - x0; double dA = (A1-A0)/(om1-om0); if (abs(c0)>100*gamma && abs(c1)>100*gamma && c0*c1>0) return P*gamma*( (A0-dA*c0)*(1/c0-1/c1)+dA*log(c1/c0)+0.5*dA*(sqr(gamma/c1)-sqr(gamma/c0)) ); ///// HERE WAS A BUG!! Corrected Dec/6/2013. if (abs(c0)>100*gamma && abs(c1)>100*gamma && c1-c0>199.9*gamma) return P*( (A0-dA*c0)*(M_PI+gamma*(1/c0-1/c1))+dA*gamma*log(abs(c1/c0))+0.5*dA*gamma*(sqr(gamma/c1)-sqr(gamma/c0)) ); ///// HERE WAS A BUG!! Corrected Dec/6/2013. //if (abs(c0)>1 && abs(c1)>1) return P*gamma*(c1-c0)*0.5*(A1+A0)/(c1*c0); ///// HERE WAS A BUG!! Corrected Dec/6/2013. double a0 = c0/gamma; double a1 = c1/gamma; double R; if (fabs(gamma)<1e-30){ R= P*gamma*((A0-dA*c0)*(1/c0-1/c1)+dA*log(fabs(c1/c0))); }else{ R = P*((A0-dA*c0)*(atan(a1)-atan(a0))+0.5*gamma*dA*log((1+sqr(a1))/(1+sqr(a0)))); } if (isnan(R) || isinf(R)){ cerr<<"R is nan or inf "<<R<<" "<<om0<<" "<<om1<<" "<<A0<<" "<<A1<<" "<<omega<<" "<<x0<<" "<<c0<<" "<<c1<<endl; cerr<<"to "<<(1+sqr(a1))<<" "<<(1+sqr(a0))<<" a0="<<a0<<" a1="<<a1<<" gamma="<<gamma<<" c0="<<c0<<" c1="<<c1<<" "<<atan(a1)-atan(a0)<<" "<<(A0-dA*c0)<<" "<<log((1+sqr(a1))/(1+sqr(a0)))<<endl; } return R; } public: double IntgAp(double om0, double om1, double A0, double A1, double omega)const{ return IntgA(om0, om1, A0, A1, omega, x0);} double IntgAm(double om0, double om1, double A0, double A1, double omega)const{ return IntgA(om0, om1, A0, A1, -omega, -x0);} double IntgApLL(const sLorentz& l, double omega) const { return P*l.P*M_PI*(gamma+l.gamma)/(sqr(gamma+l.gamma)+sqr(x0-l.x0-omega)); } double V(double x){ return P*gamma/(sqr(x-x0)+sqr(gamma));} friend ostream& operator<<(ostream& stream, const sLorentz& s); }; ostream& operator<<(ostream& stream, const sLorentz& s) { if (s.exist) stream<<setw(15)<<s.x0<<" "<<setw(15)<<s.gamma<<" "<<setw(15)<<s.P<<" "; return stream; } // Auxiliary self-energies and spectral functions class Auxiliary{ const int Na, Nc, baths; mesh1D om; function1D<double> fe; function1D<double> fedh; function1D<double> logo; function2D<double> Sigt; function2D<double> Sigtn; function2D<dcomplex> Sigc; function2D<dcomplex> Sigcore; function2D<double> Gt; function2D<double> Gp; function2D<double> Gm; vector<function2D<double> > aAc; function1D<double> Acx; function1D<double> Acy; function2D<double> Acp, Acm; AvFun<double> aF; function1D<double> Energy; function1D<double> Probability; mesh1D oml; function2D<dcomplex> Deltam_ac, Deltap_ac; function1D<double> mom_Deltam_ac, mom_Deltap_ac; function1D<dcomplex> Sigtmp; int mpos, m0, m1; function2D<double> GtA1, GtA2; vector<sLorentz> lorentzm, lorentzp; public: Auxiliary (int Na_, int Nc_, int baths_) : Na(Na_), Nc(Nc_), baths(baths_), aAc(2*baths), mom_Deltam_ac(baths), mom_Deltap_ac(baths), lorentzm(Na), lorentzp(Na),Probability(Na){}; bool ReadSelfEnergy(const string& filename, const Par<double>& Ed, const Par<double>& T, const Par<double>& U, const mesh1D& ph_omd, const function2D<double>& ph_Ac); void KramarsKronig(); double DeterminSpectralFunctions(double StartLambda, double EndLambda, double dLamdba, int followPeak); void PrintOutMeanQ(double StartLambda, double EndLambda); void PrintNorm(ostream& stream); void Print(int l, string dir); void Printn(int l); void SetSignToZero(){Sigtn=0.0;Sigcore=0.0;} void SetUpAverageAc(const mesh1D& omd, const mesh1D& momd, const function2D<double>& Ack, const function1D<double>& fed); void CalcSigmab(const mesh1D& omd); void CalcSigmaf(const mesh1D& omd); double Difference(); double DeterminSelfEnergies(double alpha, int CmpDiff); const mesh1D& omega() const {return om;} double ferm(int i) const {return fe[i];} const function2D<double>& _Gp() const {return Gp;} const function2D<double>& _Gm() const {return Gm;} void PrintSign(); double Q(double lambda); double operator()(double lambda); double minEnergy; void PrintCore(const string& filename); const function1D<double>& Energ() const{return Energy;} const vector<sLorentz>& Lorentzm()const{return lorentzm;} const vector<sLorentz>& Lorentzp()const{return lorentzp;} void CreateSigma000(const mesh1D& omd, const function2D<double>& Ac); private: void Print_aAc(int l); void Print_Qux(int l); void Print_Sign(int l, int st, int en); void PrintOutMeanQ(int M, double StartLambda, double EndLambda); }; // Physical electron spectral function and suscpetibility // Physical observables class Physical{ public: const int Na, Nc, baths; mesh1D omd; function2D<dcomplex> G00; function2D<double> A00; function1D<double> C00; function1D<dcomplex> Chi; function2D<double> A00c; function2D<dcomplex> Sig; private: mesh1D momd; function1D<double> fed; function1D<double> logod; function1D<double> th; function2D<double> Ac; function2D<dcomplex> Delta0; vector<AvFun<double> > aF; function2D<double> Gtx; function2D<double> Cmp; function1D<double> tG; function1D<bool> Pexists; public: Physical(int Na_, int Nc_, int baths_); bool ReadBathFunction(const string& filename, bool spectra); void CalculateA00(const mesh1D& omega, const function2D<double>& Gp, const function2D<double>& Gm, const function1D<double>& Energy, const vector<sLorentz>& lorentzm, const vector<sLorentz>& lorentzp); void KramarsKronig(); void DeterminG00(double alpha,ostream& loging); double Difference(); void Print(int l, string dir); void Print0(const string& filename); const mesh1D& omega() const {return omd;} const mesh1D& momega() const {return momd;} const function1D<double>& fe() const {return fed;} const function2D<double>& Ac0() const {return Ac;} void PrintA00(ostream& out); void CalcSelfEnergy(); void MissingDoping(double start); private: void CalculateProducts(double u, double fu, const mesh1D& om, const function2D<double>& Gm); bool ReadBeginning(const string& filename, istream& input, int& n, int& m, bool& begincomment, double& center); }; void AverageFunction(const mesh1D& omx, double u, const mesh1D& eps, AvFun<double>& aF, functionb<double>& aAc) { apar ap; cintpar pi; tint position = omx.InitInterpLeft(); InterpLeft(eps[0]-u, omx, position, pi); aF.InterpolateFirst(pi); InterpLeft(eps[1]-u, omx, position, pi); ap.SetUpCsFirst(u, eps); aAc[0] = aF.InterpolateNext(pi, ap) * eps.Dh(0); for (int j=1; j<eps.size()-1; j++){ InterpLeft(eps[j+1]-u, omx, position, pi); ap.SetUpCs(u, j, eps, omx.Dh(pi.i)); aAc[j] = aF.InterpolateNext(pi, ap) * eps.Dh(j); } ap.SetUpCsLast(u, eps); aAc[eps.size()-1] = aF.InterpolateLast(ap) * eps.Dh(eps.size()-1); } inline double product(const double* A, const double* G, int size) { double sum = 0; for (int i=0; i<size; i++) sum += A[i]*G[i]; return sum; } void Auxiliary::SetUpAverageAc(const mesh1D& omd, const mesh1D& momd, const function2D<double>& Ack, const function1D<double>& fed) { int m = om.find_(0.0)+1; Acx.resize(omd.size()); for (int b=0; b<baths; b++){ aAc[b].resize(om.size(),om.size()); for (int i=0; i<omd.size(); i++) Acx[i] = Ack[b][i]*(1-fed[i]); aF.SetUp(Acx,omd); for (int i=0; i<m; i++) AverageFunction(omd,om[i],om,aF,aAc[b][i]); for (int i=0; i<omd.size(); i++) Acx[i] = Ack[b][i]*fed[i]; aF.SetUp(Acx,omd); for (int i=m; i<om.size(); i++) AverageFunction(omd,om[i],om,aF,aAc[b][i]); aAc[baths+b].resize(om.size(),om.size()); for (int i=0; i<momd.size(); i++) Acx[momd.size()-i-1] = Ack[b][i]*fed[i]; aF.SetUp(Acx,momd); for (int i=0; i<m; i++) AverageFunction(momd,om[i],om,aF,aAc[baths+b][i]); for (int i=0; i<momd.size(); i++) Acx[momd.size()-i-1] = Ack[b][i]*(1-fed[i]); aF.SetUp(Acx,momd); for (int i=m; i<om.size(); i++) AverageFunction(momd,om[i],om,aF,aAc[baths+b][i]); } // For core part need Delta in more extended range Acy.resize(omd.size()); oml.resize(omd.size()+2*common::N_ac); for (int i=0; i<common::N_ac; i++) oml[i] = omd[0]-(common::N_ac-i)*common::dom_ac; for (int i=0; i<omd.size(); i++) oml[i+common::N_ac] = omd[i]; for (int i=0; i<common::N_ac; i++) oml[omd.size()+common::N_ac+i] = omd.last()+(i+1)*common::dom_ac; oml.SetUp(omd.dcenter()); Deltam_ac.resize(baths,oml.size()); Deltap_ac.resize(baths,oml.size()); Acp.resize(baths,omd.size()); Acm.resize(baths,omd.size()); for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ Acm(b,i) = Ack[b][i]*fed[i]; Acp(b,i) = Ack[b][i]*(1-fed[i]); } int ofst=0; #pragma omp parallel for for (int i=0; i<common::N_ac; i++){ double Deltar = ::KramarsKronig(Acm[b], omd, oml[i], 0, 0.0); Deltam_ac[b][i] = dcomplex(-M_PI*Deltar,0.0); Deltar = ::KramarsKronig(Acp[b], omd, oml[i], 0, 0.0); Deltap_ac[b][i] = dcomplex(-M_PI*Deltar,0.0); } ofst=common::N_ac; #pragma omp parallel for for (int i=0; i<omd.size(); i++){ double Deltar = ::KramarsKronig(Acm[b], omd, omd[i], i, Acm[b][i]); Deltam_ac[b][ofst+i] = dcomplex(-M_PI*Deltar,-M_PI*Acm[b][i]); Deltar = ::KramarsKronig(Acp[b], omd, omd[i], i, Acp[b][i]); Deltap_ac[b][ofst+i] = dcomplex(-M_PI*Deltar,-M_PI*Acp[b][i]); } ofst=common::N_ac+omd.size(); #pragma omp parallel for for (int i=0; i<common::N_ac; i++){ double Deltar = ::KramarsKronig(Acm[b], omd, oml[omd.size()+common::N_ac+i], omd.size()-1, 0.0); Deltam_ac[b][ofst+i] = dcomplex(-M_PI*Deltar, 0.0); Deltar = ::KramarsKronig(Acp[b], omd, oml[omd.size()+common::N_ac+i], omd.size()-1, 0.0); Deltap_ac[b][ofst+i] = dcomplex(-M_PI*Deltar, 0.0); } double summ=0; for (int i=0; i<omd.size(); i++) summ += Acm[b][i]*omd.Dh(i); double sump=0; for (int i=0; i<omd.size(); i++) sump += Acp[b][i]*omd.Dh(i); mom_Deltam_ac[b] = summ; mom_Deltap_ac[b] = sump; } } void Auxiliary::CalcSigmab(const mesh1D& omd) { for (int b=0; b<baths; b++){ GtA1.Product(Gm,aAc[b],0,mpos); // Gm[f,eps]*Acfm[x,eps] GtA2.Product(Gp,aAc[b],mpos,aAc[b].size_N()); // Gp[f,eps]*Acfp[x,eps] if (common::SubtractLorentz){ #pragma omp parallel for for (int j=0; j<Na; j++){ if (lorentzm[j].exist){ tint pos0=omd.size()-2, pos1=omd.size()-2; double dlmin_x0 = -common::dlmin + lorentzm[j].x0; double dlmin_x1 = common::dlmin + lorentzm[j].x0; for (int i=0; i<mpos; i++){ int k0 = omd._find(dlmin_x0 - om[i], 0, pos0); int k1 = omd._find(dlmin_x1 - om[i], 0, pos1); double sum=0; for (int k=k0; k<k1; k++) sum += lorentzm[j].IntgAp(omd[k], omd[k+1], Acp(b,k), Acp(b,k+1), om[i]); GtA1(j,i) += sum; } } if (lorentzp[j].exist){ tint pos0=omd.size()-2, pos1=omd.size()-2; double dlmin_x0 = -common::dlmin + lorentzp[j].x0; double dlmin_x1 = common::dlmin + lorentzp[j].x0; for (int i=mpos; i<om.size(); i++){ int k0 = omd._find(dlmin_x0 - om[i], 0, pos0); int k1 = omd._find(dlmin_x1 - om[i], 0, pos1); double sum = 0; for (int k=k0; k<k1; k++) sum += lorentzp[j].IntgAp(omd[k], omd[k+1], Acm(b,k), Acm(b,k+1), om[i]); GtA2(j,i-mpos) += sum; } } } } #pragma omp parallel for for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = l->second/static_cast<double>(common::deg[j]); for (int i=0; i<mpos; i++) Sigtn(j,i) += prf * GtA1(ind,i)/fe[i]; for (int i=mpos; i<om.size(); i++) Sigtn(j,i) += prf * GtA2(ind,i-mpos)/(1-fe[i]); } } } } if (!common::acore) return; for (int b=0; b<baths; b++){ for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; if (ind>=Na && ind<Na+Nc){ double prf = l->second/static_cast<double>(common::deg[j]); tint position = oml.InitInterpRight(); for (int i=0; i<om.size(); i++){ double x = Energy[ind]-common::lambda0-om[i]; dcomplex Delta=0; if (x>oml.last()) Delta = mom_Deltam_ac[b]/x; else Delta = Deltam_ac[b](oml.InterpRight(x, position)); Sigcore[j][i] += prf*Delta; } } } } } } void Auxiliary::CalcSigmaf(const mesh1D& omd) { for (int b=0; b<baths; b++){ GtA1.Product(Gm,aAc[baths+b],0,mpos); GtA2.Product(Gp,aAc[baths+b],mpos,aAc[baths+b].size_N()); if (common::SubtractLorentz){ #pragma omp parallel for for (int j=0; j<Na; j++){ if (lorentzm[j].exist){ tint pos0=0, pos1=0; double dlmin_x0 = -common::dlmin - lorentzm[j].x0; double dlmin_x1 = common::dlmin - lorentzm[j].x0; for (int i=0; i<mpos; i++){ int k0 = omd.find_(dlmin_x0 + om[i], pos0); int k1 = omd.find_(dlmin_x1 + om[i], pos1); double sum = 0; //for (int k=0; k<omd.size()-1; k++) for (int k=k0; k<k1; k++) sum += lorentzm[j].IntgAm(omd[k], omd[k+1], Acm(b,k), Acm(b,k+1), om[i]); GtA1(j,i) += sum; } } if (lorentzp[j].exist){ tint pos0=0, pos1=0; double dlmin_x0 = -common::dlmin - lorentzp[j].x0; double dlmin_x1 = common::dlmin - lorentzp[j].x0; for (int i=mpos; i<om.size(); i++){ int k0 = omd.find_(dlmin_x0 + om[i], pos0); int k1 = omd.find_(dlmin_x1 + om[i], pos1); double sum = 0; // for (int k=0; k<omd.size()-1; k++) for (int k=k0; k<k1; k++) sum += lorentzp[j].IntgAm(omd[k], omd[k+1], Acp(b,k), Acp(b,k+1), om[i]); GtA2(j,i-mpos) += sum; } } } } #pragma omp parallel for for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = l->second/static_cast<double>(common::deg[j]); for (int i=0; i<mpos; i++) Sigtn(j,i) += prf * GtA1(ind,i)/fe[i]; for (int i=mpos; i<om.size(); i++) Sigtn(j,i) += prf * GtA2(ind,i-mpos)/(1-fe[i]); } } } } if (!common::acore) return; for (int b=0; b<baths; b++){ for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){ int ind = l->first; if (ind>=Na && ind<Na+Nc){ double prf = l->second/static_cast<double>(common::deg[j]); tint position = oml.InitInterpLeft(); for (int i=0; i<om.size(); i++){ double x = om[i]-Energy[ind]+common::lambda0; dcomplex Delta=0; if (x<om[0]) Delta = mom_Deltap_ac[b]/x; else Delta = Deltap_ac[b](oml.InterpLeft(x, position)); Sigcore[j][i] += prf*Delta; } } } } } } void Auxiliary::CreateSigma000(const mesh1D& omd, const function2D<double>& Ac) {// If inteligence guess for the pseudo-particles self-energy is not found, // it creates a guess using atomic type of approximation. Sigt=0; for (int b=0; b<baths; b++){ for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double Em = Energy[ind]-minEnergy; double prf = l->second/static_cast<double>(common::deg[j]); tint pos = omd.InitInterpRight(); for (int i=0; i<om.size(); i++){ double ff; if (om[i]>0) ff = ferm_f((Em-om[i])/common::T)/(1-fe[i]); else{ double eom = exp(om[i]/common::T); ff = (eom+1.)/(eom+exp(Em/common::T)); } Sigt(j,i) += -M_PI*prf*ff*Ac[b](omd.InterpRight(Em-om[i],pos)); } } } for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double Em = Energy[ind]-minEnergy; double prf = l->second/static_cast<double>(common::deg[j]); tint pos = omd.InitInterpLeft(); for (int i=0; i<om.size(); i++){ double ff; if (om[i]>0) ff = ferm_f((Em-om[i])/common::T)/(1-fe[i]); else{ double eom = exp(om[i]/common::T); ff = (eom+1.)/(eom+exp(Em/common::T)); } Sigt(j,i) += -M_PI*prf*ff*Ac[b](omd.InterpLeft(om[i]-Em,pos)); } } } } } KramarsKronig(); } inline ostream& common::printHead(ostream& stream) { stream<<"# "; stream<<" nb="<<baths<<" "; //stream<<" T="<<T<<" ntot="<<nd<<" U="<<U<<" lambda0="<<lambda0<<" "; stream<<" T="<<T<<" ntot="<<nd<<" U="<<U<<" dFimpG="<<Fimp-TrLogGimp<<" Fimp="<<Fimp<<" Epot="<<Epot<<" TrLogGimp="<<TrLogGimp<<" lambda0="<<lambda0<<" "; stream<<" Ns=["; for (int i=0; i<baths-1; i++) stream<<Ns[i]<<","; stream<<Ns[baths-1]<<"] "; stream<<" Eimp=["; for (int i=0; i<baths-1; i++) stream<<Ed[i]<<","; stream<<Ed[baths-1]<<"] "; stream<<" nf=["; for (int i=0; i<baths-1; i++) stream<<nalpha[i]<<","; stream<<nalpha[baths-1]<<"] "; stream<<" md=["; for (int i=0; i<baths-1; i++) stream<<miss_nd[i]<<","; stream<<miss_nd[baths-1]<<"] "; stream<<" moment=["; for (int i=0; i<baths-1; i++) stream<<"["<<moment[i][0]<<","<<moment[i][1]<<"],"; stream<<"["<<moment[baths-1][0]<<","<<moment[baths-1][1]<<"]] "; if (Sinfty.size()>0){ double aS=0; for (int i=0; i<baths; i++) aS += Sinfty[i]; aS/=baths; stream<<" aSinfty="<<aS<<" "; stream<<" Sinfty=("; for (int i=0; i<baths-1; i++)stream<<Sinfty[i]<<","; stream<<Sinfty[baths-1]<<") "; } return stream; } void RememberParams (int argc, char *argv[]){ ofstream param ((common::outdir+"/history.nca").c_str(), ios::app); if (!param) cerr<<" Didn't suceeded to open params file!"<<(common::outdir+"/history.nca")<<endl; for (int i=0; i<argc; i++) param << argv[i] << " "; param << endl; } template <class T> bool ReadValue(T& a, const std::string& variable, const std::string& str){ std::string::size_type pos = str.find(variable); if (pos < std::string::npos){ std::string::size_type poseq = str.find("=",pos); if (poseq<std::string::npos){ std::istringstream streambuff(std::string(str,poseq+1)); streambuff >> a; } return true; } return false; } bool Auxiliary::ReadSelfEnergy(const string& filename, const Par<double>& Ed, const Par<double>& T, const Par<double>& U, const mesh1D& ph_omd, const function2D<double>& ph_Ac){ ifstream inputf(filename.c_str()); istream input(inputf.rdbuf()); input.seekg(0,ios::beg); if (!input) { cerr << "Can't open input file: " << filename << endl; return false; } // Is the input file started with comment? bool begincomment = false; int n = 0; string str; const double SpecNumber = -100000; double T_ = SpecNumber, U_ = SpecNumber; function1D<double> Ed_(baths); Ed_ = SpecNumber; double center = 0; getline(input,str); if (str.find('#')<string::npos){ begincomment = true; for (int i=0; i<baths; i++) ReadValue(Ed_[i], common::Eds[i], str); ReadValue(T_, "T", str); ReadValue(U_, "U", str); if (!ReadValue(center, "peakposition", str)) center=0; } else n++; if (!Ed.IsSet() && Ed_[0]!=SpecNumber) for (int i=0; i<baths; i++) common::Ed[i] = Ed_[i]; if (!T.IsSet() && T_!=SpecNumber) common::T = T_; if (!U.IsSet() && U_!=SpecNumber) common::U = U_; common::beta = 1./common::T; Energy.resize(Na+Nc); minEnergy=0; // Calculates auxiliary Energies for (int i=0; i<Na+Nc; i++){ Energy[i] = 0; for (int j=0; j<baths; j++) Energy[i] += common::Ed[j]*common::Ms[i][j]; // Energy[i] += 0.5*common::Mtot[i]*(common::Mtot[i]-1)*(common::U-0.5*common::J); // Energy[i] += common::J*common::sJc[i]; Energy[i] += 0.5*common::Mtot[i]*(common::Mtot[i]-1)*common::U; Energy[i] += common::sJc[i]; if (Energy[i]<minEnergy) minEnergy = Energy[i]; } clog<<"************* Parameters ****************"<<endl; clog<<" U = "<<common::U<<endl; for (int i=0; i<baths; i++) clog<<" Ed"<<i<<" = "<<common::Ed[i]<<endl; clog<<" T = "<<common::T<<endl; for (int i=0; i<baths; i++) clog<<" N"<<i<<" = "<<common::Ns[i]<<endl; for (int i=0; i<Na+Nc; i++){ if (i<Na) clog<<" valence state"<<setw(2)<<left<<i<<right<<" = "; else clog<<" core state"<<i<<" = "; for (int j=0; j<baths; j++) clog<<setw(2)<<common::Ms[i][j]; clog<<" with Energy"<<setw(2)<<left<<i<<right<<" = "<<Energy[i]<<endl; } clog<<"*****************************************"<<endl; // Computes the number of columns in file if (!input) { cerr << "ERROR: Wrong file format for Sigm" << endl; return false; } getline(input,str); n++; #ifdef _STRSTREAM strstream oneline; oneline << str <<ends; #else istringstream oneline(str); #endif int m=0; double t; while (oneline){oneline>>t; m++;} m--; while (input){ getline(input,str); n++;} n--; clog << filename << ": Number of entries: "<< n <<endl; clog << filename << ": Number of columns: "<< m <<endl; clog << filename << ": Peak-position "<< center <<endl; bool CreateDefault = false; if (m<2*Na+1){ //cerr<<"ERROR: Not enough columns is input Sigma file. Exiting!"<<endl; clog<<"WARRNING: Not enough columns is input self-energy for pseudoparticles.... Creating default!"<<endl; CreateDefault = true; } inputf.seekg(0,ios::beg); // clog<<"Premaknil na "<< inputf.tellg()<<endl; if (begincomment) inputf.ignore(10000,'\n'); if (!inputf){ cerr<<"Reopening didn't suceeded!"<<endl; return false;} om.resize(n); Sigt.resize(Na,n); Sigc.resize(Na,n); int l=0; double omega; while (inputf>>omega && l<n){ om[l] = omega; if (!CreateDefault){ for (int i=0; i<Na; i++){ double Sr, St; inputf>>Sr; inputf>>St; Sigc(i,l) = dcomplex(Sr,-St); Sigt(i,l) = -St; } } getline(inputf, str); l++; } inputf.close(); if (l<n) cerr<<"Something wrong by reading file "<<filename<<endl; om.SetUp(center); mpos = om.find_(0.0)+1; m0 = om.find_(-common::SearchLorentz); m1 = om.find_(common::SearchLorentz)+1; GtA1.resize(Na,mpos); GtA2.resize(Na,om.size()-mpos); Sigcore.resize(Na,om.size()); Sigtn.resize(Na,om.size()); Gt.resize(Na,om.size()); Gp.resize(Na,om.size()); Gm.resize(Na,om.size()); fe.CalcFermOnMesh(common::beta, om); logo.CalcLogOnMesh(om); fedh.resize(om.size()); for (int i=0; i<om.size(); i++) fedh[i] = fe[i]*om.Dh(i); if (CreateDefault){ CreateSigma000(ph_omd, ph_Ac); }else{ for (int j=0; j<Na; j++){ for (int i=0; i<om.size(); i++) Sigc(j,i) = dcomplex(Sigc(j,i).real(), Sigc(j,i).imag()*(1-fe[i])); } } return true; } void Auxiliary::KramarsKronig() { for (int l=0; l<Na; l++){ for (int i=0; i<om.size(); i++) Sigc(l,i).imag() = Sigt(l,i)*(1-fe[i]); Sigc[l].KramarsKronig(om, logo); } } double Lambda(double E, const functionb<dcomplex>& Sigc, const functionb<double>& Sigx, const mesh1D& om) { // looking for lambda such that \widetilde{G} has maximum at zero frequency. // Sufficient condition is that the derivative of 1/\widetilde{G} is zero at zero frequency. // One gets a quadratic equation for lambda and thus two roots. Then one chooses the root that maximizes \widetilde{G}. // If no root exists, than we take lambda that minimizes linear coeficient in the expansion of 1/\widetilde{G}. // The latter equation is linear and one always gets unique solution. intpar p = om.Interp(0.0); int i=p.i; dcomplex cs = -E-Sigc(p); dcomplex ds = (Sigc[i+1]-Sigc[i])*om.Delta(i); double cr = cs.real(); double ci = cs.imag(); double dcr = 1-ds.real(); double dci = -ds.imag(); double dSigx = (Sigx[i+1]-Sigx[i])*om.Delta(i); double x = Sigx[i]/dSigx; double determinant2 = x*(x*dcr*dcr+2*ci*dci)-ci*ci; // Minimum can not be at zero. Try to find lambda that minimizes the linear coefficient in the expansion of 1/G // If 1/G = a + b omega + c omega^2 +... and the below determinant is smaller than zero, coefficient b can not be // set to zero. Than return lambda that gives the smallest b. if (determinant2<=0) return dcr*x-cr; double d2 = -sqrt(determinant2); double d1 = -cr + dcr*x; double v1 = 1/(sqr(ci)+sqr(cr+d1+d2)); double v2 = 1/(sqr(ci)+sqr(cr+d1-d2)); cout<<"Lambda="<<d1+d2<<" "<<d1-d2<<" "<<v1<<" "<<v2<<endl; if (fabs(v1)>fabs(v2)) return d1+d2; else return d1-d2; } double Auxiliary::Q(double lambda) { double sumQ=0; for (int j=0; j<Na; j++){ double mune = -Energy[j]+lambda; sLorentz lorentz; if (common::SubtractLorentz && j>=common::FirstLorentz && j<=common::LastLorentz){ double v0 = om[m0]+mune-Sigc(j,m0).real(), v=v0; int ii=0; for (ii=m0+1; ii<m1; ii++) { v = om[ii]+mune-Sigc(j,ii).real(); if (sign(v)*sign(v0)<0) break; } double denom = om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real(); if (denom==0) cout<<"denom="<<denom<<endl; if (sign(v)*sign(v0)<0 && denom!=0){ double zero = om[ii-1]-(om[ii]-om[ii-1])*(om[ii-1]+mune-Sigc(j,ii-1).real())/(om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real()); intpar ip(ii-1,(zero-om[ii-1])/(om[ii]-om[ii-1])); double dom = om[ii]-om[ii-1]; dcomplex Sc = Sigc[j](ip); double ratio = abs(Sc.imag()/dom); if (ratio<common::LorentzMaxRatio){ double Sm = Sigt[j](ip)*fe(ip); dcomplex dSc = (Sigc[j][ii]-Sigc[j][ii-1])/dom; //(om[ii]-om[ii-1]); double dSm = (Sigt[j][ii]*fe[ii]-Sigt[j][ii-1]*fe[ii-1])/dom; //(om[ii]-om[ii-1]); double Sc_im = Sc.imag(); if (fabs(Sc_im)<1e-20) Sc_im=-1e-20; if (fabs(Sm)<1e-20) Sm=-1e-20; if (fabs(Sc_im)>=1e-20 && fabs(Sm)>=1e-20){ lorentz.Set(zero, Sc_im, Sm, dSc.real(), dSc.imag(), dSm); //cout<<"QFound zero "<<setw(2)<<left<<j<<right<<setw(10)<<zero<<" "<<lorentz<<endl;//setw(15)<<Sc<<" "<<setw(15)<<-St<<" "; } } } } double sum=0, v; for (int i=0; i<om.size(); i++){ v = fedh[i]*Sigt(j,i)/(sqr(om[i]+mune-Sigc(j,i).real())+sqr(Sigc(j,i).imag())); if (lorentz.exist) v -= om.Dh(i)*lorentz.V(om[i]); sum -= v; } sum -= lorentz.P*M_PI; sumQ += sum*common::deg[j]; } return (sumQ/M_PI); } inline double Auxiliary::operator()(double lambda) { double Q_ = Q(lambda); return Q_-common::Q0; } void Auxiliary::PrintOutMeanQ(double StartLambda, double EndLambda) { double a0 = StartLambda; int M = 100; double da0 = (EndLambda-StartLambda)/M; cout.precision(16); for (int i=0; i<M; i++){ cout << a0 << setw(25) << operator()(a0) << endl; a0 += da0; } } double Auxiliary::DeterminSpectralFunctions(double StartLambda, double EndLambda, double dLambda, int followPeak) { double lambda0; if (followPeak>=0 && followPeak<Na) lambda0 = Lambda(Energy[followPeak], Sigc[followPeak], Sigt[followPeak], om); else if (followPeak==-2){ lambda0 = minEnergy; }else{ double a0 = StartLambda, b0 = 0; int sign=0, nn=0; while (!sign && nn++<100){ double pQ = operator()(a0); while (!sign && a0<=b0) { double sQ = operator()(a0+dLambda); sign = pQ*sQ<0; pQ = sQ; if (!sign) a0 += dLambda; } if (!sign) dLambda /= 2.0; } if (nn>=100) { cerr << "Can't find root for <Q>" << endl; PrintOutMeanQ(StartLambda, EndLambda); exit(1); } // loking for zero (lambda0) lambda0 = zeroin(a0, a0+dLambda, *this, 1e-15*common::Q0); } common::lambda0 = lambda0; clog << setprecision(16) << "; lambda = "<<lambda0<<" "<<lambda0-minEnergy<<endl; double sumQ = 0, sumnd=0; function1D<double> dQ(Na); for (int j=0; j<Na; j++){ double mune = -Energy[j]+lambda0; if (common::SubtractLorentz && j>=common::FirstLorentz && j<=common::LastLorentz){ double v = om[m0]+mune-Sigc(j,m0).real(), v0=v; int ii=0; for (ii=m0+1; ii<m1; ii++) { v = om[ii]+mune-Sigc(j,ii).real(); if (sign(v)*sign(v0)<0) break; } bool found = false; double denom = om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real(); if (sign(v)*sign(v0)<0 && denom!=0){ double zero = om[ii-1]-(om[ii]-om[ii-1])*(om[ii-1]+mune-Sigc(j,ii-1).real())/(om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real()); intpar ip(ii-1,(zero-om[ii-1])/(om[ii]-om[ii-1])); double dom = om[ii]-om[ii-1]; dcomplex Sc = Sigc[j](ip); double ratio = abs(Sc.imag()/dom); //clog<<"ps"<<j<<" ratio="<<ratio<<endl; if (ratio<common::LorentzMaxRatio){ double Sm = Sigt[j](ip)*fe(ip); dcomplex dSc = (Sigc[j][ii]-Sigc[j][ii-1])/dom; double dSm = (Sigt[j][ii]*fe[ii]-Sigt[j][ii-1]*fe[ii-1])/dom; double Sc_im = Sc.imag(); if (fabs(Sc_im)<1e-20) Sc_im=-1e-20; if (fabs(Sm)<1e-20) Sm=-1e-20; if (fabs(Sc_im)>=1e-20 && fabs(Sm)>=1e-20){ found = true; lorentzm[j].Set(zero, Sc_im, Sm, dSc.real(), dSc.imag(), dSm); lorentzp[j].Set(zero, Sc_im, Sc_im, dSc.real(), dSc.imag(), dSc.imag()); //cout<<"Sc.im="<<Sc.imag()<<" Sm="<<Sm<<" dSc.r="<<dSc.real()<<" dSc.i="<<dSc.imag()<<" dSm="<<dSm<<endl; //cout<<"zero="<<zero<<" ratio="<<ratio<<" Sm="<<Sm<<" dSm="<<dSm<<" Sc_im="<<Sc_im<<endl; cout<<"Found lorentz at "<<setw(4)<<left<<j<<right<<setw(10)<<zero<<" lm="<<lorentzm[j]<<" lp="<<lorentzp[j]<<" r-"<<setw(15)<<ratio<<endl; } } } if (!found){ lorentzp[j].SetFalse(); lorentzm[j].SetFalse(); } } } // // We want to make sure that only one integer occupacition is treated with lorentz // // because we did not yet implement Lorentz*Lorentz // int MaxMtot=0; // for (int i=0; i<Na; i++) if (MaxMtot<common::Mtot[i]) MaxMtot = common::Mtot[i]; // function1D<int> lorex(MaxMtot+1);lorex=0; // for (int j=0; j<Na; j++) if (lorentzm[j].exist ||lorentzp[j].exist) lorex[common::Mtot[j]]++; // int imaxLorentz=0; // for (int i=0; i<=MaxMtot; i++) if (lorex[i]>lorex[imaxLorentz]) imaxLorentz=i; // for (int i=0; i<Na; i++){ // if (lorentzm[i].exist && common::Mtot[i]!=imaxLorentz) { cout<<"Lorentzm for "<<i<<" not accepted!"<<endl; lorentzm[i].SetFalse();} // if (lorentzp[i].exist && common::Mtot[i]!=imaxLorentz) { cout<<"Lorentzp for "<<i<<" not accepted!"<<endl; lorentzp[i].SetFalse();} // } for (int j=0; j<Na; j++){ double mune = -Energy[j]+lambda0; dQ[j]=0; for (int i=0; i<om.size(); i++){ Gt(j,i) = Sigt(j,i)/(sqr(om[i]+mune-Sigc(j,i).real())+sqr(Sigc(j,i).imag())); Gm(j,i) = fe[i]*Gt(j,i); Gp(j,i) = (1-fe[i])*Gt(j,i); if (lorentzm[j].exist) Gm(j,i) -= lorentzm[j].V(om[i]); if (lorentzp[j].exist) Gp(j,i) -= lorentzp[j].V(om[i]); dQ[j] -= Gm(j,i)*om.Dh(i); } dQ[j] -= lorentzm[j].P*M_PI; dQ[j] *= common::deg[j]/M_PI; sumQ += dQ[j]; sumnd += dQ[j]*common::Mtot[j]; } clog<<" Q = "<<sumQ<<endl; for (int j=0; j<Na; j++){ Probability[j] = dQ[j]/sumQ; clog<<setprecision(16)<<" n"<<j<<"="<<dQ[j]/sumQ<<endl; } for (int b=0; b<baths; b++){ common::nalpha[b]=0; for (int j=0; j<Na; j++) common::nalpha[b] += dQ[j]*common::Ms[j][b]; common::nalpha[b]/=sumQ; } common::Q = sumQ; common::Fimp = common::lambda0-common::T * ::log(common::Q); double Epot=0; for (int j=0; j<Na; j++) Epot += Probability[j]*Energy[j]; double dEpot=0; for (int b=0; b<baths; b++) dEpot += common::Ed[b]*common::nalpha[b]; common::Epot = Epot-dEpot; clog<<" Fimp="<<common::Fimp<<" Epot="<<common::Epot<<" Epot+OneP="<<Epot<<endl; // if (fabs(sumQ-common::Q0)>1e-10) cerr<<"Something wrong with Q "<<sumQ<<"!"<<endl; clog<<" Q is here equal to "<<sumQ<<endl; return sumnd/sumQ; } void Auxiliary::Print(int l, string dir="") { string filename; if (l<0) filename = common::outdir+"/Sigma"+dir; else filename = NameOfFile(common::outdir+"/Sigma", l); ofstream out1(filename.c_str()); out1.precision(16); common::printHead(out1)<<" peakposition="<<om.dcenter()<<endl; for (int i=0; i<om.size(); i++){ out1<<setw(25)<<om[i]; for (int j=0; j<Na; j++) out1<<setw(25)<<Sigc(j,i).real()<<" "<<setw(25)<<-Sigt(j,i); out1<<endl; } if (l<0) filename = common::outdir+"/Spec"+dir; else filename = NameOfFile(common::outdir+dir+"/Spec", l); ofstream out2(filename.c_str()); out2.precision(16); common::printHead(out2)<<" peakposition="<<om.dcenter()<<endl; for (int i=0; i<om.size(); i++){ out2<<setw(25)<<om[i]; for (int j=0; j<Na; j++) out2<<setw(25)<<-Gt(j,i); for (int j=0; j<Na; j++) out2<<setw(25)<<-Gp(j,i); for (int j=0; j<Na; j++) out2<<setw(25)<<-Gm(j,i); out2<<endl; } } void Auxiliary::Printn(int l) { string filename; filename = NameOfFile(common::outdir+"/nSigma", l); ofstream out1(filename.c_str()); out1.precision(16); common::printHead(out1)<<" peakposition="<<om.dcenter()<<endl; for (int i=0; i<om.size(); i++){ out1<<setw(25)<<om[i]; for (int j=0; j<Na; j++) out1<<setw(25)<<-Sigtn(j,i); out1<<endl; } } Physical::Physical(int Na_, int Nc_, int baths_) : Na(Na_), Nc(Nc_), baths(baths_), aF(Na) { Pexists.resize(Na); for (int j=0; j<Na; j++){ Pexists[j]=false; for (int b=0; b<baths; b++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ if (l->first >=0 && l->first < Na){ Pexists[j]=true; break; } } } if (!Pexists[j] && common::cmp_susc){ for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++) if (l->first >=0 && l->first < Na){ Pexists[j]=true; break; } } } } bool Physical::ReadBeginning(const string& filename, istream& input, int& n, int& m, bool& begincomment, double& center) { if (!input) { cerr << "Can't open input file: " << filename << endl; return false; } // Is the input file started with comment? begincomment = false; n = 0; string str; getline(input,str); if (str.find('#')<string::npos){ begincomment = true; if (!ReadValue(center, "peakposition", str)) center=0; } else n++; // Computes the number of columns in file if (!input) { cerr << "ERROR: Wrong file format for Sigm" << endl; return false; } getline(input,str); n++; stringstream oneline; oneline << str << ends; m=0; double t; while (oneline){oneline>>t; m++;} m--; while (input){ getline(input,str); n++;} n--; clog << filename << ": Number of entries: "<< n <<endl; clog << filename << ": Number of columns: "<< m <<endl; clog << filename << ": Peak-position "<< center <<endl; input.seekg(0, ios::beg); input.clear(); if (begincomment) getline(input, str); return true; } bool Physical::ReadBathFunction(const string& filename, bool spectra=true) // spectra=true: only spectral function will be read not the retarded quantity { ifstream inputf(filename.c_str()); istream input(inputf.rdbuf()); input.seekg(0,ios::beg); if (!input) { cerr << "Can't open input file: " << filename << endl; return false; } // Is the input file started with comment? bool begincomment = false; int n = 0; string str; double center=0; getline(input,str); if (str.find('#')<string::npos){ begincomment = true; if (!ReadValue(center, "peakposition", str)) center=0; } else n++; // Computes the number of columns in file if (!input) { cerr << "ERROR: Wrong file format for " << filename << endl; return false; } getline(input,str); n++; #ifdef _STRSTREAM strstream oneline; oneline << str <<ends; #else istringstream oneline(str); #endif int m=0; double t; while (oneline){oneline>>t; m++;} m--; while (input){ getline(input,str); n++;} n--; clog << filename << ": Number of entries: "<< n <<endl; clog << filename << ": Number of columns: "<< m <<endl; clog << filename << ": Peak-position "<< center <<endl; int number_cols = baths+1; if (!spectra) number_cols = 2*baths+1; if (m<number_cols){ cerr<<"ERROR: Not enough columns in bath input file! Exiting..."<<endl; return false; } inputf.seekg(0, ios::beg); clog<<"Premaknil na "<< inputf.tellg()<<endl; if (begincomment) inputf.ignore(1000,'\n'); if (!inputf){ cerr<<"Reopening didn't suceeded!"<<endl; return false;} omd.resize(n); momd.resize(n); G00.resize(baths,n); A00.resize(baths,n); A00c.resize(baths,n); Sig.resize(baths,n); Ac.resize(baths,n); Delta0.resize(baths,n); if (common::cmp_susc){ C00.resize(n); Chi.resize(n); } int l=0; double omega; while (inputf>>omega && l<n){ omd[l] = omega; if (spectra) for (int j=0; j<baths; j++) inputf>>Ac(j,l); else{ for (int j=0; j<baths; j++) { double dr, di; inputf>>dr; inputf>>di; Ac(j,l) = -di/M_PI; Delta0(j,l) = dcomplex(dr,di); } } getline(inputf, str); momd[n-l-1] = -omd[l]; l++; } inputf.close(); if (l<n) cerr<<"Something wrong by reading file "<<filename<<endl; omd.SetUp(center); momd.SetUp(-center); fed.CalcFermOnMesh(common::beta, omd); th.CalcTanhOnMesh(common::beta, omd); logod.CalcLogOnMesh(omd); if (spectra){ for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ double Deltar = ::KramarsKronig(Ac[b], omd, omd[i], i, Ac[b][i]); Delta0(b,i) = dcomplex(-M_PI*Deltar,-M_PI*Ac[b][i]); } } } return true; } void Physical::CalculateProducts(double u, double fu, const mesh1D& om, const function2D<double>& Gm) { apar ap; cintpar pi; tint position = om.InitInterpLeft(); InterpLeft(om[0]-u, om, position, pi); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].InterpolateFirst(pi); InterpLeft(om[1]-u, om, position, pi); ap.SetUpCsFirst(u, om); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,0) = aF[i].InterpolateNext(pi, ap) * om.Dh(0); for (int j=1; j<om.size()-1; j++){ InterpLeft(om[j+1]-u, om, position, pi); ap.SetUpCs(u, j, om, om.Dh(pi.i+1)); //#pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,j) = aF[i].InterpolateNext(pi, ap) * om.Dh(j); } ap.SetUpCsLast(u, om); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,om.size()-1) = aF[i].InterpolateLast(ap) * om.Dh(om.size()-1); Cmp.resize(Na,Na); #pragma omp parallel for for (int i=0; i<Na; i++){ for (int b=0; b<baths; b++){ for (map<int,double>::const_iterator l=common::sncab[i][b].begin(); l!=common::sncab[i][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na) Cmp(i,ind) = product(Gtx[i].MemPt(),Gm[ind].MemPt(),om.size())/fu; } } if (common::cmp_susc){ for (map<int,double>::const_iterator l=common::suscb[i].begin(); l!=common::suscb[i].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na) Cmp(i,ind) = product(Gtx[i].MemPt(),Gm[ind].MemPt(),om.size())/fu; } } } } void Physical::CalculateA00(const mesh1D& omega, const function2D<double>& Gp, const function2D<double>& Gm, const function1D<double>& Energy, const vector<sLorentz>& lorentzm, const vector<sLorentz>& lorentzp) { int m = omd.find_(0.0)+1; Gtx.resize(Na, omega.size()); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].SetUp(Gp[i],omega); for (int i=0; i<m; i++){ CalculateProducts(omd[i], fed[i], omega, Gm); #pragma omp parallel for for (int b=0; b<baths; b++){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; double prf = l->second/common::Ns[b]; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } A00(b,i) = sum/(M_PI*M_PI*common::Q); } if (common::cmp_susc){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++){ int ind = l->first; double prf = l->second; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } C00[i] = sum*th[i]/(M_PI*common::Q); } } #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].SetUp(Gm[i],omega); for (int i=m; i<omd.size(); i++){ CalculateProducts(omd[i], (1-fed[i]), omega, Gp); #pragma omp parallel for for (int b=0; b<baths; b++){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; double prf = l->second/common::Ns[b]; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } A00(b,i) = sum/(M_PI*M_PI*common::Q); } if (common::cmp_susc){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++){ int ind = l->first; double prf = l->second; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } C00[i] = sum*th[i]/(M_PI*common::Q); } } if (common::SubtractLorentz){ for (int b=0; b<baths; b++){ //cout<<"Starting parallel part"<<endl; double* A00_private = new double[omd.size()]; for (int s=0; s<omd.size(); s++) A00_private[s]=0.0; for (int i=0; i<Na; i++){ for (map<int,double>::const_iterator l=common::sncab[i][b].begin(); l!=common::sncab[i][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = (l->second/common::Ns[b])/(M_PI*M_PI)/common::Q; if (lorentzm[ind].exist){ #pragma omp parallel for for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[ind].IntgAp(omega[k], omega[k+1], Gp(i,k), Gp(i,k+1), omd[j]); //A00(b,j) += sum*prf/fed[j]; A00_private[j] += sum*prf/fed[j]; } } if (lorentzp[ind].exist){ #pragma omp parallel for for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[ind].IntgAp(omega[k], omega[k+1], Gm(i,k), Gm(i,k+1), omd[j]); //A00(b,j) += sum*prf/(1-fed[j]); A00_private[j] += sum*prf/(1-fed[j]); } } if (lorentzp[i].exist){ #pragma omp parallel for for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[i].IntgAp(omega[k], omega[k+1], Gm(ind,k), Gm(ind,k+1), -omd[j]); //A00(b,j) += sum*prf/fed[j]; A00_private[j] += sum*prf/fed[j]; } } if (lorentzm[i].exist){ #pragma omp parallel for for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[i].IntgAp(omega[k], omega[k+1], Gp(ind,k), Gp(ind,k+1), -omd[j]); //A00(b,j) += sum*prf/(1-fed[j]); A00_private[j] += sum*prf/(1-fed[j]); } } if (lorentzm[ind].exist && lorentzp[i].exist) #pragma omp parallel for for (int j=0; j<m; j++){ //A00(b,j) += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf/fed[j]; A00_private[j] += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf/fed[j]; } if (lorentzp[ind].exist && lorentzm[i].exist) #pragma omp parallel for for (int j=m; j<omd.size(); j++){ //A00(b,j) += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf/(1-fed[j]); A00_private[j] += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf/(1-fed[j]); } } } } for (int s=0; s<omd.size(); s++) A00(b,s) += A00_private[s]; delete[] A00_private; //cout<<"Just ended parallel part"<<endl; } if (common::cmp_susc){ for (int i=0; i<Na; i++){ for (map<int,double>::const_iterator l=common::suscb[i].begin(); l!=common::suscb[i].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = (l->second)/(M_PI*common::Q); if (lorentzm[ind].exist){ for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[ind].IntgAp(omega[k], omega[k+1], Gp(i,k), Gp(i,k+1), omd[j]); C00[j] += sum*prf*th[j]/fed[j]; } } if (lorentzp[ind].exist){ for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[ind].IntgAp(omega[k], omega[k+1], Gm(i,k), Gm(i,k+1), omd[j]); C00[j] += sum*prf*th[j]/(1-fed[j]); } } if (lorentzp[i].exist){ for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[i].IntgAp(omega[k], omega[k+1], Gm(ind,k), Gm(ind,k+1), -omd[j]); C00[j] += sum*prf*th[j]/fed[j]; } } if (lorentzm[i].exist){ for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[i].IntgAp(omega[k], omega[k+1], Gp(ind,k), Gp(ind,k+1), -omd[j]); C00[j] += sum*prf*th[j]/(1-fed[j]); } } if (lorentzm[ind].exist && lorentzp[i].exist) for (int j=0; j<m; j++) C00[j] += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf * th[j]/fed[j]; if (lorentzp[ind].exist && lorentzm[i].exist) for (int j=m; j<omd.size(); j++) C00[j] += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf * th[j]/(1-fed[j]); } } } } } if (common::pcore){ // core stuff for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ double sum1=0; for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ if (l->first >= Na){ int ind = l->first; double x = Energy[ind]-common::lambda0-omd[i]; double prf = l->second/common::Ns[b]; sum1 -= prf*Gm[j](omega.Interp(x))/common::Q/M_PI; } } } double sum2=0; for (int j=Na; j<Na+Nc; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ if (l->first >= 0 && l->first<Na){ int ind = l->first; double x = Energy[j]-common::lambda0+omd[i]; double prf = l->second/common::Ns[b]; sum2 -= prf*Gm[ind](omega.Interp(x))/common::Q/M_PI; } } } A00c(b,i) = sum1+sum2; } } // Checking doping! for (int b=0; b<baths; b++){ double suma = 0, sumc = 0; for (int i=0; i<omd.size(); i++) { suma += A00(b,i)*fed[i]*omd.Dh(i); sumc += A00c(b,i)*fed[i]*omd.Dh(i); } suma *= common::Ns[b]; sumc *= common::Ns[b]; double miss_nd = common::nalpha[b]-(suma+sumc); double core_fact = 1.; if (sumc!=0 && common::renorm_core){ core_fact = (common::nalpha[b]-suma)/sumc; if (core_fact<0) core_fact=0; if (core_fact>10) core_fact = 10; cout<<b<<" : "<<miss_nd<<" renormaliziang core part by "<<core_fact<<endl; } for (int i=0; i<omd.size(); i++) A00(b,i) += A00c(b,i)*core_fact; if (common::renorm){ double suml=0, sumr=0; for (int i=0; i<omd.size(); i++){ suml += A00(b,i)*fed[i]*omd.Dh(i); sumr += A00(b,i)*(1-fed[i])*omd.Dh(i); } int izero = omd.find_(0.0); double ml1=0, mr1=0; for (int i=0; i<izero; i++) { ml1 += omd[i]*A00(b,i)*fed[i]*omd.Dh(i); mr1 += omd[i]*A00(b,i)*(1-fed[i])*omd.Dh(i); } double ml2=0, mr2=0; for (int i=izero+1; i<omd.size(); i++) { ml2 += omd[i]*A00(b,i)*fed[i]*omd.Dh(i); mr2 += omd[i]*A00(b,i)*(1-fed[i])*omd.Dh(i); } double n0 = common::nalpha[b]/common::Ns[b]; double C = (-ml2 + ml2*n0 + mr2*n0 - mr2*suml + ml2*sumr)/(ml1*mr2-ml2*mr1); double D = (ml1 - ml1*n0 - mr1*n0 + mr1*suml - ml1*sumr)/(ml1*mr2-ml2*mr1); if (1+C*omd[0]<0) C = -1/omd[0]; if (1+D*omd.last()<0) D = -1/omd.last(); for (int i=0; i<izero; i++) A00(b,i) *= (1+C*omd[i]); for (int i=izero+1; i<omd.size(); i++) A00(b,i) *= (1+D*omd[i]); cout<<"Renormalizing A["<<b<<"] by "<<C<<", "<<D<<"at negative and positive frequency"<<endl; } } } // ofstream out("Aloc.imp"); out.precision(16); // for (int i=0; i<omd.size(); i++){ // out<<setw(25)<<omd[i]<<" "; // for (int b=0; b<baths; b++) out<<setw(25)<<A00(b,i)<<" "; // out<<endl; // } } inline void Physical::KramarsKronig() { for (int b=0; b<baths; b++) G00[b].KramarsKronig(omd,logod); } void Physical::CalcSelfEnergy() { for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ //double Deltar = ::KramarsKronig(Ac[b], omd, omd[i], i, Ac[b][i]); //dcomplex Delta(-M_PI*Deltar,-M_PI*Ac[b][i]); Sig[b][i] = omd[i]-common::Ed[b]-Delta0[b][i]-1/G00[b][i]; if (Sig[b][i].imag()>0) Sig[b][i].imag()=0.0; } } if (common::cmp_susc){ for (int i=0; i<omd.size(); i++) Chi[i] = dcomplex(::KramarsKronig(C00, omd, omd[i], i, C00[i]),C00[i]); } } void Physical::Print(int n, string dir="") { string filename; if (n<0) filename = common::outdir+"/A00"+dir; else filename = common::outdir+NameOfFile("/A00",n,3); ofstream out(filename.c_str()); out.precision(16); common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++){ out <<setw(25)<<omd[i]; for (int b=0; b<baths; b++) out<<setw(25)<<A00[b][i]<<setw(25)<<G00[b][i]<<setw(25)<<-Sig[b][i]; out<<endl; } if (n<0) filename = common::outdir+"/Susc"+dir; else filename = common::outdir+NameOfFile("/Susc",n,3); ofstream outs(filename.c_str()); outs.precision(16); common::printHead(outs)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++) outs <<setw(25)<<omd[i]<<setw(25)<<Chi[i]<<endl; } void Physical::Print0(const string& filename) { ofstream out(filename.c_str()); out.precision(16); common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++){ out <<setw(25)<<omd[i]; for (int b=0; b<baths; b++) out<<setw(25)<<A00[b][i]; for (int b=0; b<baths; b++) out<<setw(25)<<G00[b][i]; for (int b=0; b<baths; b++) out<<setw(25)<<-Sig[b][i]; out<<endl; } } double Auxiliary::DeterminSelfEnergies(double alpha,int CmpDiff){ double beta=1-alpha; Sigtmp.resize(om.size()); if (CmpDiff<0) CmpDiff = Na; double diff=0, norm=0; for (int j=0; j<Na; j++){ for (int i=0; i<om.size(); i++) if (Sigtn(j,i)>0) Sigtn(j,i)=0; for (int i=0; i<om.size(); i++) Sigtmp[i].imag() = Sigtn(j,i)*(1-fe[i]); Sigtmp.KramarsKronig(om, logo); for (int i=0; i<om.size(); i++){ dcomplex Sigcn = Sigtmp[i] + Sigcore(j,i); Sigtn(j,i) += Sigcore(j,i).imag(); if (j<CmpDiff){ diff += fabs(Sigtn(j,i)-Sigt(j,i)); norm += fabs(Sigt(j,i)); } Sigt(j,i) = beta*Sigt(j,i)+alpha*Sigtn(j,i); Sigc(j,i) = beta*Sigc(j,i)+alpha*Sigcn; } } return diff/norm; } void Physical::DeterminG00(double alpha,ostream& loging) { double beta=1-alpha; double alphapi=-alpha*M_PI; for (int b=0; b<baths; b++){ for (int j=0; j<omd.size(); j++) G00[b][j].imag()=beta*G00[b][j].imag()+alphapi*A00[b][j]; G00[b].KramarsKronig(omd,logod); } common::TrLogGimp=0.0; for (int b=0; b<baths; b++){ double Ndf=0.0; double dsum=0; for (int j=0; j<omd.size(); j++){ dsum += -log(-G00[b][j]).imag()*fed[j]*omd.Dh(j)/M_PI; Ndf += -G00[b][j].imag()*fed[j]*omd.Dh(j)/M_PI; } common::TrLogGimp += dsum*common::Ns[b]; Ndf *= common::Ns[b]; loging<<"Expected density:"<<common::nalpha[b]<<" numerical density:"<<Ndf<<endl; } loging<<"TrLogGimp="<<common::TrLogGimp<<endl; } void Auxiliary::PrintNorm(ostream& stream) { stream<<" Norm of Spectral functions: "<<endl<<" "; stream.setf(ios::fixed); for (int i=0; i<Na; i++){ double sum=0; for (int j=0; j<om.size(); j++) sum += Gp(i,j)*om.Dh(j); sum += lorentzp[i].P*M_PI; sum/=-M_PI; double norm0=1; stream<<setprecision(4)<<" "; if (fabs(sum-norm0)<1e-2) stream<<COLOR(GREEN,setw(2)<<i<<":"<<setw(8)<<sum)<<" "; else if (fabs(sum-norm0)<1e-1) stream<<COLOR(YELLOW,setw(2)<<i<<":"<<setw(8)<<sum)<<" "; else stream<<COLOR(PURPLE,setw(2)<<i<<":"<<setw(8)<<sum)<<" "; if ((i+1)%6==0) stream<<endl<<" "; } stream<<endl; for (int b=0; b<baths; b++){ stream<<setprecision(4)<<" "<<COLOR(BLUE,setw(2)<<b<<":"<<setw(8)<<common::nalpha[b])<<" "; } stream<<endl; stream.unsetf(ios::fixed); } void Physical::PrintA00(ostream& out) { out.precision(16); common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++){ out<<setw(25)<<omd[i]; for (int b=0; b<baths; b++) out<<setw(25)<<A00[i]; out<<endl; } } double Auxiliary::Difference(){ double diff=0, norm=0; for (int j=0; j<Na; j++){ for (int i=0; i<om.size(); i++){ diff += fabs(Sigtn(j,i)-Sigt(j,i)); norm += 0.5*fabs(Sigtn(j,i)+Sigtn(j,i)); } } return diff/norm; } /******************* Used only for debugging **********************/ void Auxiliary::PrintSign() { for (int i=0; i<Na; i++){ ofstream out(NameOfFile("Sign",i,2).c_str()); out.precision(16); for (int j=0; j<om.size(); j++) out<<setw(25)<<om[j]<<setw(25)<<-Sigtn[i][j]<<endl; } } void Auxiliary::Print_aAc(int l) { for (int i=0; i<aAc[0].size_N(); i++){ ofstream out(NameOfFile_("aAc",l,i,1,3).c_str()); out.precision(16); for (int j=0; j<aAc[0].size_Nd(); j++){ out<<setw(25)<<om[j]<<setw(25)<<aAc[0][i][j]/om.Dh(j)<<endl; } } } /******************* New things ******************************/ void common::ParsInputFile(const string& filename) { ifstream input(filename.c_str()); string line; getline(input,line); input>>baths; Ns.resize(baths); for (int i=0; i<baths; i++) input>>Ns[i]; input>>Na; input>>Nc; getline(input,line); getline(input,line); if (!input){ cerr<<filename<<" file not recognized. Error in first 3 lines!"<<endl; exit(1);} deg.resize(Na+Nc); Ms.resize(Na+Nc,baths); Mtot.resize(Na+Nc); sJc.resize(Na+Nc); ncab.resize(Na+Nc, baths); ncaf.resize(Na+Nc, baths); prefactb.resize(Na+Nc, baths); prefactf.resize(Na+Nc, baths); prefactG.resize(Na+Nc, baths); sncab.resize(Na+Nc); sncaf.resize(Na+Nc); for (int i=0; i<Na+Nc; i++) sncab[i].resize(baths); for (int i=0; i<Na+Nc; i++) sncaf[i].resize(baths); vector<int> Nncab(baths), Nncaf(baths); for (int i=0; i<Na+Nc; i++){ getline(input, line); if (!input){ cerr<<filename<<" file not recognized. Error in line number "<<i+3<<endl; exit(1);} stringstream thisline(line); int lc; thisline>>lc; for (int j=0; j<baths; j++) thisline>>Ms[i][j]; thisline>>Mtot[i]>>deg[i]>>sJc[i]; for (int j=0; j<baths; j++) thisline>>Nncab[j]; for (int j=0; j<baths; j++) thisline>>Nncaf[j]; string cross; double fct; int ind; for (int j=0; j<baths; j++){ for (int k=0; k<Nncab[j]; k++){ thisline>>fct>>cross>>ind; sncab[i][j][ind]=fct; } } for (int j=0; j<baths; j++){ for (int k=0; k<Nncaf[j]; k++){ thisline>>fct>>cross>>ind; sncaf[i][j][ind]=fct; } } if (!input){ cerr<<filename<<" file not recognized. Error in line number "<<i+3<<endl; exit(1);} } getline(input, line);// comment cmp_susc = false; if (input){ suscb.resize(Na); for (int i=0; i<Na; i++){ getline(input, line); if (!input) goto exit_loop; stringstream thisline(line); int lc; thisline>>lc; int ndiagram; thisline>>ndiagram; string cross; double fct; int ind; for (int j=0; j<ndiagram; j++){ thisline>>fct>>cross>>ind; suscb[i][ind]=fct; } } cmp_susc = true; } exit_loop: PrintParsedData(cout); totDeg = 0; for (int i=0; i<Na; i++) totDeg += deg[i]; } void common::PrintParsedData(ostream& stream) { stream<<baths<<" "; for (int i=0; i<baths; i++) stream<<Ns[i]<<" "; stream<<Na<<" "<<Nc<<endl; for (int i=0; i<Na+Nc; i++){ stream<<setw(3)<<i<<" "; if (i<Na) stream<<"v "; else stream<<"c "; for (int j=0; j<baths; j++) stream<<setw(10)<<Ms[i][j]; stream<<setw(4)<<Mtot[i]<<setw(5)<<deg[i]<<setw(6)<<sJc[i]; for (int b=0; b<baths; b++) stream<<setw(2)<<sncab[i][b].size()<<" "; for (int b=0; b<baths; b++) stream<<setw(2)<<sncaf[i][b].size()<<" "; for (int b=0; b<baths; b++) for (map<int,double>::const_iterator l=sncab[i][b].begin(); l!=sncab[i][b].end(); l++) stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right; for (int b=0; b<baths; b++) for (map<int,double>::const_iterator l=sncaf[i][b].begin(); l!=sncaf[i][b].end(); l++) stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right; stream<<endl; } if (!cmp_susc) return; stream<<"Susceptibility digrams:"<<endl; for (int i=0; i<Na; i++){ stream<<setw(3)<<i<<" "; for (map<int,double>::const_iterator l=suscb[i].begin(); l!=suscb[i].end(); l++) stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right; stream<<endl; } } void print(std::ostream& stream, const mesh1D& om, const function2D<dcomplex>& f, int width=20) { if (om.size()!=f.size_Nd()) std::cerr<<"Can't print objectc of different size!"<<std::endl; for (int i=0; i<om.size(); i++){ stream <<std::setw(width)<<om[i]; for (int j=0; j<f.size_N(); j++) stream<<std::setw(width)<<f(j,i); stream<<std::endl; } } void Physical::MissingDoping(double start) { cout<<"Missing doping : "; for (int b=0; b<baths; b++){ double sum = 0; for (int i=0; i<omd.size(); i++) { if (omd[i]>start) sum += G00[b][i].imag()*fed[i]*omd.Dh(i); } sum *= -common::Ns[b]/M_PI; common::miss_nd[b] = common::nalpha[b]-sum; cout<<b<<" : "<<common::miss_nd[b]<<" "; } cout<<endl; common::Sinfty.resize(baths); for (int b=0; b<baths; b++){ double sum0 = 0, sum1 = 0; for (int i=0; i<omd.size(); i++) { sum0 += A00(b,i)*omd.Dh(i); sum1 += A00(b,i)*omd[i]*omd.Dh(i); } common::moment[b][0] = sum0; common::moment[b][1] = sum1; common::Sinfty[b] = sum1/sum0-common::Ed[b]; } } void Auxiliary::PrintCore(const string& filename) { ofstream out(filename.c_str()); for (int i=0; i<om.size(); i++){ out<<setw(20)<<om[i]<<" "; for (int j=0; j<Na; j++){ out<<setw(20)<<Sigcore[j][i]<<" "; } out<<endl; } } #endif
wino_conv_kernel_mips.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, Martin Han * Author: hansh-sz@hotmail.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "wino_conv_kernel_mips.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(num_thread) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __mips_msa v4f32 _sum0 = {0.f}; v4f32 _sum1 = {0.f}; v4f32 _sum2 = {0.f}; v4f32 _sum3 = {0.f}; v4f32 _sum4 = {0.f}; v4f32 _sum5 = {0.f}; v4f32 _sum6 = {0.f}; v4f32 _sum7 = {0.f}; int q = 0; for (; q + 3 < inch; q = q + 4) { v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 8, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 12, 0); v4f32 _k0 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k1 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k2 = (v4f32)__msa_ld_w(kptr + 8, 0); v4f32 _k3 = (v4f32)__msa_ld_w(kptr + 12, 0); v4f32 _k4 = (v4f32)__msa_ld_w(kptr + 16, 0); v4f32 _k5 = (v4f32)__msa_ld_w(kptr + 20, 0); v4f32 _k6 = (v4f32)__msa_ld_w(kptr + 24, 0); v4f32 _k7 = (v4f32)__msa_ld_w(kptr + 28, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r0, _k0)); _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_r0, _k1)); _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_r0, _k2)); _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_r0, _k3)); _sum4 = __msa_fadd_w(_sum4, __msa_fmul_w(_r0, _k4)); _sum5 = __msa_fadd_w(_sum5, __msa_fmul_w(_r0, _k5)); _sum6 = __msa_fadd_w(_sum6, __msa_fmul_w(_r0, _k6)); _sum7 = __msa_fadd_w(_sum7, __msa_fmul_w(_r0, _k7)); kptr += 32; _k0 = (v4f32)__msa_ld_w(kptr, 0); _k1 = (v4f32)__msa_ld_w(kptr + 4, 0); _k2 = (v4f32)__msa_ld_w(kptr + 8, 0); _k3 = (v4f32)__msa_ld_w(kptr + 12, 0); _k4 = (v4f32)__msa_ld_w(kptr + 16, 0); _k5 = (v4f32)__msa_ld_w(kptr + 20, 0); _k6 = (v4f32)__msa_ld_w(kptr + 24, 0); _k7 = (v4f32)__msa_ld_w(kptr + 28, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r1, _k0)); _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_r1, _k1)); _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_r1, _k2)); _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_r1, _k3)); _sum4 = __msa_fadd_w(_sum4, __msa_fmul_w(_r1, _k4)); _sum5 = __msa_fadd_w(_sum5, __msa_fmul_w(_r1, _k5)); _sum6 = __msa_fadd_w(_sum6, __msa_fmul_w(_r1, _k6)); _sum7 = __msa_fadd_w(_sum7, __msa_fmul_w(_r1, _k7)); kptr += 32; _k0 = (v4f32)__msa_ld_w(kptr, 0); _k1 = (v4f32)__msa_ld_w(kptr + 4, 0); _k2 = (v4f32)__msa_ld_w(kptr + 8, 0); _k3 = (v4f32)__msa_ld_w(kptr + 12, 0); _k4 = (v4f32)__msa_ld_w(kptr + 16, 0); _k5 = (v4f32)__msa_ld_w(kptr + 20, 0); _k6 = (v4f32)__msa_ld_w(kptr + 24, 0); _k7 = (v4f32)__msa_ld_w(kptr + 28, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r2, _k0)); _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_r2, _k1)); _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_r2, _k2)); _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_r2, _k3)); _sum4 = __msa_fadd_w(_sum4, __msa_fmul_w(_r2, _k4)); _sum5 = __msa_fadd_w(_sum5, __msa_fmul_w(_r2, _k5)); _sum6 = __msa_fadd_w(_sum6, __msa_fmul_w(_r2, _k6)); _sum7 = __msa_fadd_w(_sum7, __msa_fmul_w(_r2, _k7)); kptr += 32; _k0 = (v4f32)__msa_ld_w(kptr, 0); _k1 = (v4f32)__msa_ld_w(kptr + 4, 0); _k2 = (v4f32)__msa_ld_w(kptr + 8, 0); _k3 = (v4f32)__msa_ld_w(kptr + 12, 0); _k4 = (v4f32)__msa_ld_w(kptr + 16, 0); _k5 = (v4f32)__msa_ld_w(kptr + 20, 0); _k6 = (v4f32)__msa_ld_w(kptr + 24, 0); _k7 = (v4f32)__msa_ld_w(kptr + 28, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r3, _k0)); _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_r3, _k1)); _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_r3, _k2)); _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_r3, _k3)); _sum4 = __msa_fadd_w(_sum4, __msa_fmul_w(_r3, _k4)); _sum5 = __msa_fadd_w(_sum5, __msa_fmul_w(_r3, _k5)); _sum6 = __msa_fadd_w(_sum6, __msa_fmul_w(_r3, _k6)); _sum7 = __msa_fadd_w(_sum7, __msa_fmul_w(_r3, _k7)); kptr += 32; r0 += 16; } for (; q < inch; q++) { v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _k0 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k1 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k2 = (v4f32)__msa_ld_w(kptr + 8, 0); v4f32 _k3 = (v4f32)__msa_ld_w(kptr + 12, 0); v4f32 _k4 = (v4f32)__msa_ld_w(kptr + 16, 0); v4f32 _k5 = (v4f32)__msa_ld_w(kptr + 20, 0); v4f32 _k6 = (v4f32)__msa_ld_w(kptr + 24, 0); v4f32 _k7 = (v4f32)__msa_ld_w(kptr + 28, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r0, _k0)); _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_r0, _k1)); _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_r0, _k2)); _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_r0, _k3)); _sum4 = __msa_fadd_w(_sum4, __msa_fmul_w(_r0, _k4)); _sum5 = __msa_fadd_w(_sum5, __msa_fmul_w(_r0, _k5)); _sum6 = __msa_fadd_w(_sum6, __msa_fmul_w(_r0, _k6)); _sum7 = __msa_fadd_w(_sum7, __msa_fmul_w(_r0, _k7)); kptr += 32; r0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output1_tm, 0); __msa_st_w((v4i32)_sum2, output2_tm, 0); __msa_st_w((v4i32)_sum3, output3_tm, 0); __msa_st_w((v4i32)_sum4, output4_tm, 0); __msa_st_w((v4i32)_sum5, output5_tm, 0); __msa_st_w((v4i32)_sum6, output6_tm, 0); __msa_st_w((v4i32)_sum7, output7_tm, 0); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __mips_msa output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __mips_msa v4f32 _sum0 = {0.f}; v4f32 _sum1 = {0.f}; v4f32 _sum2 = {0.f}; v4f32 _sum3 = {0.f}; for (int q = 0; q < inch; q++) { v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _k0 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k1 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k2 = (v4f32)__msa_ld_w(kptr + 8, 0); v4f32 _k3 = (v4f32)__msa_ld_w(kptr + 12, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r0, _k0)); _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_r0, _k1)); _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_r0, _k2)); _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_r0, _k3)); kptr += 16; r0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output1_tm, 0); __msa_st_w((v4i32)_sum2, output2_tm, 0); __msa_st_w((v4i32)_sum3, output3_tm, 0); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __mips_msa output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __mips_msa v4f32 _sum0 = {0.f}; for (int q = 0; q < inch; q++) { v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _k0 = (v4f32)__msa_ld_w(kptr, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_r0, _k0)); kptr += 16; r0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += ( int )r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __mips_msa output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2 * pad_h; int padded_inw = TILE * block_w + 2 * pad_w; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ // printf("wino run\n"); int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2 * pad_h0; int padded_in_w = block_w * TILE + 2 * pad_h0; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; pad_0_align_3D(priv_info->input_pad, input, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { conv3x3s1_winograd43_sse(priv_info->input_pad + i * input_size + g * input_size_g, output, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }